code stringlengths 3 1.18M | language stringclasses 1
value |
|---|---|
/**
* The contents of this file are subject to the license and copyright
* detailed in the LICENSE and NOTICE files at the root of the source
* tree and available online at
*
* http://www.dspace.org/license/
*/
package org.dspace.content.crosswalk;
/**
* Something went wrong inside the crosswalk, not necessarily caused by
* the input or state (although it could be an incorrectly handled pathological
* case). Most likely caused by a configuration problem. It deserves its own
* exception because many crosswalks are configuration-driven (e.g. the XSLT
* crosswalks) so configuration errors are likely to be common enough that
* they ought to be easy to identify and debug.
*
* @author Larry Stone
* @version $Revision: 5844 $
*/
public class CrosswalkObjectNotSupported extends CrosswalkException
{
public CrosswalkObjectNotSupported(String s)
{
super(s);
}
public CrosswalkObjectNotSupported(String arg0, Throwable arg1)
{
super(arg0, arg1);
}
public CrosswalkObjectNotSupported(Throwable arg0)
{
super(arg0);
}
}
| Java |
/**
* The contents of this file are subject to the license and copyright
* detailed in the LICENSE and NOTICE files at the root of the source
* tree and available online at
*
* http://www.dspace.org/license/
*/
package org.dspace.content.crosswalk;
import org.apache.log4j.Logger;
import org.dspace.authorize.AuthorizeException;
import org.dspace.content.DCValue;
import org.dspace.content.DSpaceObject;
import org.dspace.content.Item;
import org.dspace.core.ConfigurationManager;
import org.dspace.core.Constants;
import org.dspace.core.SelfNamedPlugin;
import org.jdom.Element;
import org.jdom.Namespace;
import org.jdom.Verifier;
import java.io.File;
import java.io.FileInputStream;
import java.io.IOException;
import java.sql.SQLException;
import java.util.*;
/**
* Crosswalk for creating appropriate <meta> elements to appear in the
* item display page for a particular item, for improving automated processing
* of the page (e.g. by search engines). The metadata included should be as rich
* yet standards-compliant as possible.
* <P>
* The configuration file
* <code>${dspace.dir}/config/xhtml-head-item.properties</code> contains the
* relevant mappings. Note: where there is a custom qualifier for which no
* corresponding mapping exists, the crosswalk will remove the qualifier and try
* again with just the element.
* <P>
* e.g. if a field exists in the database "dc.contributor.editor", and there is
* no dc.contributor.editor property below, the mapping for "dc.contributor"
* will be used. If an element in the item metadata record does not appear in
* the configuration, it is simply ignored; the emphasis here is on exposing
* standards-compliant metadata.
* <P>
* TODO: This may usefully be extended later to work with communities and
* collections.
*
* @version $Revision: 5844 $
* @author Robert Tansley
*/
public class XHTMLHeadDisseminationCrosswalk extends SelfNamedPlugin implements
DisseminationCrosswalk
{
/** log4j logger */
private static Logger log = Logger
.getLogger(XHTMLHeadDisseminationCrosswalk.class);
/** Location of config file */
private final String config = ConfigurationManager
.getProperty("dspace.dir")
+ File.separator
+ "config"
+ File.separator
+ "crosswalks"
+ File.separator + "xhtml-head-item.properties";
private static final String XHTML_NAMESPACE = "http://www.w3.org/1999/xhtml";
/**
* Maps DSpace metadata field to name to use in XHTML head element, e.g.
* dc.creator or dc.description.abstract
*/
private Map<String, String> names;
/** Maps DSpace metadata field to scheme for that field, if any */
private Map<String, String> schemes;
/** Schemas to add -- maps schema.NAME to schema URL */
private Map<String, String> schemaURLs;
public XHTMLHeadDisseminationCrosswalk() throws IOException
{
names = new HashMap<String, String>();
schemes = new HashMap<String, String>();
schemaURLs = new HashMap<String, String>();
// Read in configuration
Properties crosswalkProps = new Properties();
FileInputStream fis = new FileInputStream(config);
try
{
crosswalkProps.load(fis);
}
finally
{
if (fis != null)
{
try
{
fis.close();
}
catch (IOException ioe)
{
}
}
}
Enumeration e = crosswalkProps.keys();
while (e.hasMoreElements())
{
String prop = (String) e.nextElement();
if (prop.startsWith("schema."))
{
schemaURLs.put(prop, crosswalkProps.getProperty(prop));
}
else
{
String[] s = ((String) crosswalkProps.get(prop)).split(",");
if (s.length == 2)
{
schemes.put(prop, s[1]);
}
if (s.length == 1 || s.length == 2)
{
names.put(prop, s[0]);
} else
{
log.warn("Malformed parameter " + prop + " in " + config);
}
}
}
}
public boolean canDisseminate(DSpaceObject dso)
{
return (dso.getType() == Constants.ITEM);
}
/**
* This generates a <head> element around the metadata; in general
* this will probably not be used
*/
public Element disseminateElement(DSpaceObject dso)
throws CrosswalkException, IOException, SQLException,
AuthorizeException
{
Element head = new Element("head", XHTML_NAMESPACE);
head.addContent(disseminateList(dso));
return head;
}
/**
* Return <meta> elements that can be put in the <head> element
* of an XHTML document.
*/
public List<Element> disseminateList(DSpaceObject dso) throws CrosswalkException,
IOException, SQLException, AuthorizeException
{
if (dso.getType() != Constants.ITEM)
{
String h = dso.getHandle();
throw new CrosswalkObjectNotSupported(
"Can only support items; object passed in with DB ID "
+ dso.getID() + ", type "
+ Constants.typeText[dso.getType()] + ", handle "
+ (h == null ? "null" : h));
}
Item item = (Item) dso;
String handle = item.getHandle();
List<Element> metas = new ArrayList<Element>();
DCValue[] values = item.getMetadata(Item.ANY, Item.ANY, Item.ANY, Item.ANY);
// Add in schema URLs e.g. <link rel="schema.DC" href="...." />
Iterator<String> schemaIterator = schemaURLs.keySet().iterator();
while (schemaIterator.hasNext())
{
String s = schemaIterator.next();
Element e = new Element("link", XHTML_NAMESPACE);
e.setAttribute("rel", s);
e.setAttribute("href", schemaURLs.get(s));
metas.add(e);
}
for (int i = 0; i < values.length; i++)
{
DCValue v = values[i];
// Work out the key for the Maps that will tell us which metadata
// name + scheme to use
String key = v.schema + "." + v.element
+ (v.qualifier != null ? "." + v.qualifier : "");
String originalKey = key; // For later error msg
// Find appropriate metadata field name to put in element
String name = names.get(key);
// If we don't have a field, try removing qualifier
if (name == null && v.qualifier != null)
{
key = v.schema + "." + v.element;
name = names.get(key);
}
// Do not include description.provenance
boolean provenance = "description".equals(v.element) && "provenance".equals(v.qualifier);
if (name == null)
{
// Most of the time, in this crosswalk, an unrecognised
// element is OK, so just report at DEBUG level
if (log.isDebugEnabled())
{
log.debug("No <meta> field for item "
+ (handle == null ? String.valueOf(dso.getID())
: handle) + " field " + originalKey);
}
}
else if (!provenance)
{
Element e = new Element("meta", XHTML_NAMESPACE);
e.setAttribute("name", name);
if (v.value == null)
{
e.setAttribute("content", "");
}
else
{
// Check that we can output the content
String reason = Verifier.checkCharacterData(v.value);
if (reason == null)
{
// TODO: Check valid encoding? We assume UTF-8
// TODO: Check escaping "<>&
e.setAttribute("content", v.value);
}
else
{
// Warn that we found invalid characters
log.warn("Invalid attribute characters in Metadata: " + reason);
// Strip any characters that we can, and if the result is valid, output it
String simpleText = v.value.replaceAll("\\p{Cntrl}", "");
if (Verifier.checkCharacterData(simpleText) == null)
{
e.setAttribute("content", simpleText);
}
}
}
if (v.language != null && !v.language.equals(""))
{
e.setAttribute("lang", v.language, Namespace.XML_NAMESPACE);
}
String schemeAttr = schemes.get(key);
if (schemeAttr != null)
{
e.setAttribute("scheme", schemeAttr);
}
metas.add(e);
}
}
return metas;
}
public Namespace[] getNamespaces()
{
return new Namespace[] {Namespace.getNamespace(XHTML_NAMESPACE)};
}
public String getSchemaLocation()
{
return "";
}
public boolean preferList()
{
return true;
}
// Plugin Methods
public static String[] getPluginNames()
{
return new String[] {"XHTML_HEAD_ITEM"};
}
}
| Java |
/**
* The contents of this file are subject to the license and copyright
* detailed in the LICENSE and NOTICE files at the root of the source
* tree and available online at
*
* http://www.dspace.org/license/
*/
package org.dspace.content.crosswalk;
import java.io.IOException;
import java.util.List;
import java.sql.SQLException;
import org.dspace.core.Context;
import org.dspace.content.DSpaceObject;
import org.dspace.authorize.AuthorizeException;
import org.jdom.Element;
import org.jdom.Namespace;
/**
* Crosswalk descriptive metadata to and from DIM (DSpace Intermediate
* Metadata) format, strictly for the purpose of including a precise and
* complete record of the DMD in an AIP. Although the DIM format was never
* intended to be used outside of DSpace, it is admirably suited to
* describing the exact state of the descriptive MD stored in the RDBMS.
* All crosswalks to standard formats such as MODS and even DC are necessarily
* "lossy" and inexact. Since the purpose of an AIP is to preserve and restore
* the state of an object exactly, DIM is the preferred format for
* recording its descriptive MD.
* <p>
* In order to allow external applications to make sense of DSpace AIPs for
* preservation purposes, we recommend adding a parallel descriptive
* metadata section in one of the preferred standard formats such as MODS
* as well as the DIM.
*
* @author Larry Stone
* @version $Revision: 1.2 $
*/
public class AIPDIMCrosswalk
implements DisseminationCrosswalk, IngestionCrosswalk
{
/**
* Get XML namespaces of the elements this crosswalk may return.
* Returns the XML namespaces (as JDOM objects) of the root element.
*
* @return array of namespaces, which may be empty.
*/
public Namespace[] getNamespaces()
{
Namespace result[] = new Namespace[1];
result[0] = XSLTCrosswalk.DIM_NS;
return result;
}
/**
* Get the XML Schema location(s) of the target metadata format.
* Returns the string value of the <code>xsi:schemaLocation</code>
* attribute that should be applied to the generated XML.
* <p>
* It may return the empty string if no schema is known, but crosswalk
* authors are strongly encouraged to implement this call so their output
* XML can be validated correctly.
* @return SchemaLocation string, including URI namespace, followed by
* whitespace and URI of XML schema document, or empty string if unknown.
*/
public String getSchemaLocation()
{
return "";
}
/**
* Predicate: Can this disseminator crosswalk the given object.
* Needed by OAI-PMH server implementation.
*
* @param dso dspace object, e.g. an <code>Item</code>.
* @return true when disseminator is capable of producing metadata.
*/
public boolean canDisseminate(DSpaceObject dso)
{
return true;
}
/**
* Predicate: Does this disseminator prefer to return a list of Elements,
* rather than a single root Element?
* <p>
* Some metadata formats have an XML schema without a root element,
* for example, the Dublin Core and Qualified Dublin Core formats.
* This would be <code>true</code> for a crosswalk into QDC, since
* it would "prefer" to return a list, since any root element it has
* to produce would have to be part of a nonstandard schema. In
* most cases your implementation will want to return
* <code>false</code>
*
* @return true when disseminator prefers you call disseminateList().
*/
public boolean preferList()
{
return false;
}
/**
* Execute crosswalk, returning List of XML elements.
* Returns a <code>List</code> of JDOM <code>Element</code> objects representing
* the XML produced by the crosswalk. This is typically called when
* a list of fields is desired, e.g. for embedding in a METS document
* <code>xmlData</code> field.
* <p>
* When there are no results, an
* empty list is returned, but never <code>null</code>.
*
* @param dso the DSpace Object whose metadata to export.
* @return results of crosswalk as list of XML elements.
*
* @throws CrosswalkInternalException (<code>CrosswalkException</code>) failure of the crosswalk itself.
* @throws CrosswalkObjectNotSupported (<code>CrosswalkException</code>) Cannot crosswalk this kind of DSpace object.
* @throws IOException I/O failure in services this calls
* @throws SQLException Database failure in services this calls
* @throws AuthorizeException current user not authorized for this operation.
*/
public List<Element> disseminateList(DSpaceObject dso)
throws CrosswalkException, IOException, SQLException,
AuthorizeException
{
Element dim = disseminateElement(dso);
return dim.getChildren();
}
/**
* Execute crosswalk, returning one XML root element as
* a JDOM <code>Element</code> object.
* This is typically the root element of a document.
* <p>
*
* @param dso the DSpace Object whose metadata to export.
* @return root Element of the target metadata, never <code>null</code>
*
* @throws CrosswalkInternalException (<code>CrosswalkException</code>) failure of the crosswalk itself.
* @throws CrosswalkObjectNotSupported (<code>CrosswalkException</code>) Cannot crosswalk this kind of DSpace object.
* @throws IOException I/O failure in services this calls
* @throws SQLException Database failure in services this calls
* @throws AuthorizeException current user not authorized for this operation.
*/
public Element disseminateElement(DSpaceObject dso)
throws CrosswalkException, IOException, SQLException,
AuthorizeException
{
return XSLTDisseminationCrosswalk.createDIM(dso);
}
/**
* Ingest a whole document. Build Document object around root element,
* and feed that to the transformation, since it may get handled
* differently than a List of metadata elements.
*/
public void ingest(Context context, DSpaceObject dso, Element root)
throws CrosswalkException, IOException, SQLException, AuthorizeException
{
ingest(context, dso, root.getChildren());
}
/**
* Fields correspond directly to Item.addMetadata() calls so
* they are simply executed.
*/
public void ingest(Context context, DSpaceObject dso, List<Element> dimList)
throws CrosswalkException,
IOException, SQLException, AuthorizeException
{
XSLTIngestionCrosswalk.ingestDIM(context, dso, dimList);
}
}
| Java |
/**
* The contents of this file are subject to the license and copyright
* detailed in the LICENSE and NOTICE files at the root of the source
* tree and available online at
*
* http://www.dspace.org/license/
*/
package org.dspace.content.crosswalk;
import java.io.IOException;
import java.sql.SQLException;
import java.util.List;
import org.dspace.authorize.AuthorizeException;
import org.dspace.content.DSpaceObject;
import org.dspace.content.Item;
import org.dspace.core.Constants;
import org.dspace.core.Context;
import org.jdom.Element;
import org.jdom.Namespace;
/**
* DIM ingestion crosswalk
* <p>
* Processes Dublic Core metadata encased in an oai_dc:dc wrapper
*
* @author Alexey Maslov
* @version $Revision: 1 $
*/
public class OAIDCIngestionCrosswalk
implements IngestionCrosswalk
{
private static final Namespace DC_NS = Namespace.getNamespace("http://www.dspace.org/xmlns/dspace/dim");
private static final Namespace OAI_DC_NS = Namespace.getNamespace("http://www.openarchives.org/OAI/2.0/oai_dc/");
public void ingest(Context context, DSpaceObject dso, List<Element> metadata) throws CrosswalkException, IOException, SQLException, AuthorizeException {
Element wrapper = new Element("wrap", metadata.get(0).getNamespace());
wrapper.addContent(metadata);
ingest(context,dso,wrapper);
}
public void ingest(Context context, DSpaceObject dso, Element root) throws CrosswalkException, IOException, SQLException, AuthorizeException {
if (dso.getType() != Constants.ITEM)
{
throw new CrosswalkObjectNotSupported("DIMIngestionCrosswalk can only crosswalk an Item.");
}
Item item = (Item)dso;
if (root == null) {
System.err.println("The element received by ingest was null");
return;
}
List<Element> metadata = root.getChildren();
for (Element element : metadata) {
item.addMetadata("dc", element.getName(), null, null, element.getText());
}
}
}
| Java |
/**
* The contents of this file are subject to the license and copyright
* detailed in the LICENSE and NOTICE files at the root of the source
* tree and available online at
*
* http://www.dspace.org/license/
*/
package org.dspace.content.crosswalk;
import java.io.IOException;
import java.sql.SQLException;
import java.util.List;
import org.dspace.authorize.AuthorizeException;
import org.dspace.content.DSpaceObject;
import org.jdom.Element;
import org.jdom.Namespace;
/**
* Dissemination Crosswalk plugin -- translate DSpace native
* metadata into an external XML format.
* <p>
* This interface describes a plugin that produces metadata in an XML-based
* format from the state of a DSpace object. Note that the object
* may be an Item, Bitstream, Community, or Collection, although most
* implementations only work on one type of object.
*
* @author Larry Stone
* @version $Revision: 5844 $
*/
public interface DisseminationCrosswalk
{
/** XSI namespace, required for xsi:schemalocation attributes */
static final Namespace XSI_NS =
Namespace.getNamespace("xsi", "http://www.w3.org/2001/XMLSchema-instance");
/**
* Get XML namespaces of the elements this crosswalk may return.
* Returns the XML namespaces (as JDOM objects) of the root element.
*
* @return array of namespaces, which may be empty.
*/
public Namespace[] getNamespaces();
/**
* Get the XML Schema location(s) of the target metadata format.
* Returns the string value of the <code>xsi:schemaLocation</code>
* attribute that should be applied to the generated XML.
* <p>
* It may return the empty string if no schema is known, but crosswalk
* authors are strongly encouraged to implement this call so their output
* XML can be validated correctly.
* @return SchemaLocation string, including URI namespace, followed by
* whitespace and URI of XML schema document, or empty string if unknown.
*/
public String getSchemaLocation();
/**
* Predicate: Can this disseminator crosswalk the given object.
* Needed by OAI-PMH server implementation.
*
* @param dso dspace object, e.g. an <code>Item</code>.
* @return true when disseminator is capable of producing metadata.
*/
public boolean canDisseminate(DSpaceObject dso);
/**
* Predicate: Does this disseminator prefer to return a list of Elements,
* rather than a single root Element?
* <p>
* Some metadata formats have an XML schema without a root element,
* for example, the Dublin Core and Qualified Dublin Core formats.
* This would be <code>true</code> for a crosswalk into QDC, since
* it would "prefer" to return a list, since any root element it has
* to produce would have to be part of a nonstandard schema. In
* most cases your implementation will want to return
* <code>false</code>
*
* @return true when disseminator prefers you call disseminateList().
*/
public boolean preferList();
/**
* Execute crosswalk, returning List of XML elements.
* Returns a <code>List</code> of JDOM <code>Element</code> objects representing
* the XML produced by the crosswalk. This is typically called when
* a list of fields is desired, e.g. for embedding in a METS document
* <code>xmlData</code> field.
* <p>
* When there are no results, an
* empty list is returned, but never <code>null</code>.
*
* @param dso the DSpace Object whose metadata to export.
* @return results of crosswalk as list of XML elements.
*
* @throws CrosswalkInternalException (<code>CrosswalkException</code>) failure of the crosswalk itself.
* @throws CrosswalkObjectNotSupported (<code>CrosswalkException</code>) Cannot crosswalk this kind of DSpace object.
* @throws IOException I/O failure in services this calls
* @throws SQLException Database failure in services this calls
* @throws AuthorizeException current user not authorized for this operation.
*/
public List<Element> disseminateList(DSpaceObject dso)
throws CrosswalkException, IOException, SQLException,
AuthorizeException;
/**
* Execute crosswalk, returning one XML root element as
* a JDOM <code>Element</code> object.
* This is typically the root element of a document.
* <p>
*
* @param dso the DSpace Object whose metadata to export.
* @return root Element of the target metadata, never <code>null</code>
*
* @throws CrosswalkInternalException (<code>CrosswalkException</code>) failure of the crosswalk itself.
* @throws CrosswalkObjectNotSupported (<code>CrosswalkException</code>) Cannot crosswalk this kind of DSpace object.
* @throws IOException I/O failure in services this calls
* @throws SQLException Database failure in services this calls
* @throws AuthorizeException current user not authorized for this operation.
*/
public Element disseminateElement(DSpaceObject dso)
throws CrosswalkException, IOException, SQLException,
AuthorizeException;
}
| Java |
/**
* The contents of this file are subject to the license and copyright
* detailed in the LICENSE and NOTICE files at the root of the source
* tree and available online at
*
* http://www.dspace.org/license/
*/
package org.dspace.content.crosswalk;
import java.io.IOException;
import java.sql.SQLException;
import java.util.List;
import org.apache.log4j.Logger;
import org.dspace.authorize.AuthorizeException;
import org.dspace.content.DSpaceObject;
import org.dspace.core.Context;
import org.jdom.Element;
import org.jdom.output.Format;
import org.jdom.output.XMLOutputter;
/**
* "Null" ingestion crosswalk
* <p>
* Use this crosswalk to ignore a metadata record on ingest. It was
* intended to be used with a package importer such as the METS
* packager, which may receive metadata records of types for which it
* hasn't got a crosswalk. The safest thing to do with these is ignore
* them. To do that, use the plugin configuration to map the name
* of the metadata type to this plugin (or within the METS ingester,
* use its metadata-name remapping configuration).
* <pre>
* # ignore LOM metadata when it comes up:
* plugin.named.org.dspace.content.crosswalk.SubmissionCrosswalk = \
* org.dspace.content.crosswalk.NullIngestionCrosswalk = NULL, LOM
* </pre>
* @author Larry Stone
* @version $Revision: 5844 $
*/
public class NullIngestionCrosswalk
implements IngestionCrosswalk
{
/** log4j category */
private static Logger log = Logger.getLogger(NullIngestionCrosswalk.class);
private static XMLOutputter outputPretty = new XMLOutputter(Format.getPrettyFormat());
public void ingest(Context context, DSpaceObject dso, Element root)
throws CrosswalkException, IOException, SQLException, AuthorizeException
{
// drop xml on the floor but mention what we're missing for debugging:
log.debug("Null crosswalk is ignoring this metadata Element: \n"+
outputPretty.outputString(root));
}
public void ingest(Context context, DSpaceObject dso, List<Element> ml)
throws CrosswalkException, IOException, SQLException, AuthorizeException
{
// drop xml on the floor but mention what we're missing for debugging:
log.debug("Null crosswalk is ignoring this List of metadata: \n"+
outputPretty.outputString(ml));
}
public boolean preferList()
{
return false;
}
}
| Java |
/**
* The contents of this file are subject to the license and copyright
* detailed in the LICENSE and NOTICE files at the root of the source
* tree and available online at
*
* http://www.dspace.org/license/
*/
package org.dspace.content.crosswalk;
/**
* Superclass for more-specific crosswalk exceptions.
* Use this class in declarations and catchers to simplify code
* and allow for new exception types to be added.
*
* @author Larry Stone
* @version $Revision: 5844 $
*/
public class CrosswalkException extends Exception
{
public CrosswalkException()
{
super();
}
public CrosswalkException(String s, Throwable t)
{
super(s, t);
}
public CrosswalkException(String s)
{
super(s);
}
public CrosswalkException(Throwable t)
{
super(t);
}
}
| Java |
/**
* The contents of this file are subject to the license and copyright
* detailed in the LICENSE and NOTICE files at the root of the source
* tree and available online at
*
* http://www.dspace.org/license/
*/
package org.dspace.content.crosswalk;
import java.io.OutputStream;
import java.io.IOException;
import java.sql.SQLException;
import org.apache.log4j.Logger;
import org.dspace.authorize.AuthorizeException;
import org.dspace.content.DSpaceObject;
import org.dspace.content.Bitstream;
import org.dspace.content.Item;
import org.dspace.core.Constants;
import org.dspace.core.Context;
import org.dspace.core.Utils;
import org.dspace.license.CreativeCommons;
/**
* Export the item's Creative Commons license, RDF form.
*
* @author Larry Stone
* @version $Revision: 1.0 $
*/
public class CreativeCommonsRDFStreamDisseminationCrosswalk
implements StreamDisseminationCrosswalk
{
/** log4j logger */
private static Logger log = Logger.getLogger(CreativeCommonsRDFStreamDisseminationCrosswalk.class);
public boolean canDisseminate(Context context, DSpaceObject dso)
{
try
{
return dso.getType() == Constants.ITEM &&
CreativeCommons.getLicenseRdfBitstream((Item)dso) != null;
}
catch (Exception e)
{
log.error("Failed getting CC license", e);
return false;
}
}
public void disseminate(Context context, DSpaceObject dso, OutputStream out)
throws CrosswalkException, IOException, SQLException, AuthorizeException
{
if (dso.getType() == Constants.ITEM)
{
Bitstream cc = CreativeCommons.getLicenseRdfBitstream((Item)dso);
if (cc != null)
{
Utils.copy(cc.retrieve(), out);
out.close();
}
}
}
public String getMIMEType()
{
return "text/xml";
}
}
| Java |
/**
* The contents of this file are subject to the license and copyright
* detailed in the LICENSE and NOTICE files at the root of the source
* tree and available online at
*
* http://www.dspace.org/license/
*/
package org.dspace.content.crosswalk;
/**
*
* This indicates a problem with the input metadata (for submission) or
* item state (dissemination). It is invalid or incomplete, or simply
* unsuitable to be crosswalked.
*
* @author Larry Stone
* @version $Revision: 5844 $
*/
public class MetadataValidationException extends CrosswalkException
{
public MetadataValidationException()
{
super();
}
public MetadataValidationException(String arg0, Throwable arg1)
{
super(arg0, arg1);
}
public MetadataValidationException(String arg0)
{
super(arg0);
}
public MetadataValidationException(Throwable arg0)
{
super(arg0);
}
}
| Java |
/**
* The contents of this file are subject to the license and copyright
* detailed in the LICENSE and NOTICE files at the root of the source
* tree and available online at
*
* http://www.dspace.org/license/
*/
package org.dspace.content.crosswalk;
import java.io.OutputStream;
import java.io.IOException;
import java.sql.SQLException;
import org.apache.log4j.Logger;
import org.dspace.authorize.AuthorizeException;
import org.dspace.content.DSpaceObject;
import org.dspace.content.Bitstream;
import org.dspace.content.Item;
import org.dspace.content.packager.PackageUtils;
import org.dspace.core.Constants;
import org.dspace.core.Context;
import org.dspace.core.Utils;
/**
* Export the object's DSpace deposit license.
*
* @author Larry Stone
* @version $Revision: 1.0 $
*/
public class LicenseStreamDisseminationCrosswalk
implements StreamDisseminationCrosswalk
{
/** log4j logger */
private static Logger log = Logger.getLogger(LicenseStreamDisseminationCrosswalk.class);
public boolean canDisseminate(Context context, DSpaceObject dso)
{
try
{
return dso.getType() == Constants.ITEM &&
PackageUtils.findDepositLicense(context, (Item)dso) != null;
}
catch (Exception e)
{
log.error("Failed getting Deposit license", e);
return false;
}
}
public void disseminate(Context context, DSpaceObject dso, OutputStream out)
throws CrosswalkException, IOException, SQLException, AuthorizeException
{
if (dso.getType() == Constants.ITEM)
{
Bitstream licenseBs = PackageUtils.findDepositLicense(context, (Item)dso);
if (licenseBs != null)
{
Utils.copy(licenseBs.retrieve(), out);
out.close();
}
}
}
public String getMIMEType()
{
return "text/plain";
}
}
| Java |
/**
* The contents of this file are subject to the license and copyright
* detailed in the LICENSE and NOTICE files at the root of the source
* tree and available online at
*
* http://www.dspace.org/license/
*/
package org.dspace.content.crosswalk;
import java.io.IOException;
import java.net.URLEncoder;
import java.sql.SQLException;
import java.util.ArrayList;
import java.util.List;
import org.apache.commons.lang.ArrayUtils;
import org.apache.commons.lang.StringUtils;
import org.apache.log4j.Logger;
import org.dspace.authorize.AuthorizeException;
import org.dspace.content.Bitstream;
import org.dspace.content.BitstreamFormat;
import org.dspace.content.Bundle;
import org.dspace.content.DSpaceObject;
import org.dspace.content.FormatIdentifier;
import org.dspace.content.Item;
import org.dspace.core.ConfigurationManager;
import org.dspace.core.Constants;
import org.dspace.core.Context;
import org.jdom.Element;
import org.jdom.Namespace;
/**
* PREMIS Crosswalk
* <p>
* Translate between DSpace Bitstream properties and PREMIS metadata format
* (see <a href="http://www.oclc.org/research/projects/pmwg/">
* http://www.oclc.org/research/projects/pmwg/</a> for details).
* This is intended to implement the requirements of the DSpace METS SIP
* specification for both ingest and dissemination.
*
* @author Larry Stone
* @version $Revision: 5844 $
*/
public class PREMISCrosswalk
implements IngestionCrosswalk, DisseminationCrosswalk
{
/** log4j category */
private static Logger log = Logger.getLogger(PREMISCrosswalk.class);
private static final Namespace PREMIS_NS =
Namespace.getNamespace("premis", "http://www.loc.gov/standards/premis");
// XML schemaLocation fragment for this crosswalk, from config.
private String schemaLocation =
PREMIS_NS.getURI()+" http://www.loc.gov/standards/premis/PREMIS-v1-0.xsd";
private static final Namespace namespaces[] = { PREMIS_NS };
/*----------- Submission functions -------------------*/
public void ingest(Context context, DSpaceObject dso, Element root)
throws CrosswalkException, IOException, SQLException, AuthorizeException
{
if (!(root.getName().equals("premis")))
{
throw new MetadataValidationException("Wrong root element for PREMIS: " + root.toString());
}
ingest(context, dso, root.getChildren());
}
public void ingest(Context context, DSpaceObject dso, List<Element> ml)
throws CrosswalkException, IOException, SQLException, AuthorizeException
{
// we only understand how to crosswalk PREMIS to a Bitstream.
if (dso.getType() != Constants.BITSTREAM)
{
throw new CrosswalkObjectNotSupported("Wrong target object type, PREMISCrosswalk can only crosswalk to a Bitstream.");
}
Bitstream bitstream = (Bitstream)dso;
String MIMEType = null;
String bsName = null;
for (Element me : ml)
{
if (me.getName().equals("premis"))
{
// if we're fed a <premis> wrapper object, recurse on its guts:
ingest(context, dso, me.getChildren());
}
else if (me.getName().equals("object"))
{
// "object" section:
// originalName becomes new bitstream source and (default) name
Element on = me.getChild("originalName", PREMIS_NS);
if (on != null)
{
bsName = on.getTextTrim();
}
// Reconcile technical metadata with bitstream content;
// check that length and message digest (checksum) match.
// XXX FIXME: wait for Checksum Checker code to add better test.
Element oc = me.getChild("objectCharacteristics", PREMIS_NS);
if (oc != null)
{
String ssize = oc.getChildTextTrim("size", PREMIS_NS);
if (ssize != null)
{
try
{
int size = Integer.parseInt(ssize);
if (bitstream.getSize() != size)
{
throw new MetadataValidationException(
"Bitstream size (" + String.valueOf(bitstream.getSize()) +
") does not match size in PREMIS (" + ssize + "), rejecting it.");
}
}
catch (NumberFormatException ne)
{
throw new MetadataValidationException("Bad number value in PREMIS object/objectCharacteristics/size: "+ssize, ne);
}
}
Element fixity = oc.getChild("fixity", PREMIS_NS);
if (fixity != null)
{
String alg = fixity.getChildTextTrim("messageDigestAlgorithm", PREMIS_NS);
String md = fixity.getChildTextTrim("messageDigest", PREMIS_NS);
String b_alg = bitstream.getChecksumAlgorithm();
String b_md = bitstream.getChecksum();
if (StringUtils.equals(alg, b_alg))
{
if (StringUtils.equals(md, b_md))
{
log.debug("Bitstream checksum agrees with PREMIS: " + bitstream.getName());
}
else
{
throw new MetadataValidationException("Bitstream " + alg + " Checksum does not match value in PREMIS (" + b_md + " != " + md + "), for bitstream: " + bitstream.getName());
}
}
else
{
log.warn("Cannot test checksum on bitstream=" + bitstream.getName() +
", algorithm in PREMIS is different: " + alg);
}
}
// Look for formatDesignation/formatName, which is
// MIME Type. Match with DSpace bitstream format.
Element format = oc.getChild("format", PREMIS_NS);
if (format != null)
{
Element fd = format.getChild("formatDesignation", PREMIS_NS);
if (fd != null)
{
MIMEType = fd.getChildTextTrim("formatName", PREMIS_NS);
}
}
}
// Apply new bitstream name if we found it.
if (bsName != null)
{
bitstream.setName(bsName);
log.debug("Changing bitstream id="+String.valueOf(bitstream.getID())+"name and source to: "+bsName);
}
// reconcile bitstream format; if there's a MIMEtype,
// get it from that, otherwise try to divine from file extension
// (guessFormat() looks at bitstream Name, which we just set)
BitstreamFormat bf = (MIMEType == null) ? null :
BitstreamFormat.findByMIMEType(context, MIMEType);
if (bf == null)
{
bf = FormatIdentifier.guessFormat(context, bitstream);
}
if (bf != null)
{
bitstream.setFormat(bf);
}
}
else
{
log.debug("Skipping element: " + me.toString());
}
}
bitstream.update();
}
/*----------- Dissemination functions -------------------*/
public Namespace[] getNamespaces()
{
return (Namespace[]) ArrayUtils.clone(namespaces);
}
public String getSchemaLocation()
{
return schemaLocation;
}
public boolean canDisseminate(DSpaceObject dso)
{
//PREMISCrosswalk can only crosswalk a Bitstream
return (dso.getType() == Constants.BITSTREAM);
}
public Element disseminateElement(DSpaceObject dso)
throws CrosswalkException,
IOException, SQLException, AuthorizeException
{
if (dso.getType() != Constants.BITSTREAM)
{
throw new CrosswalkObjectNotSupported("PREMISCrosswalk can only crosswalk a Bitstream.");
}
Bitstream bitstream = (Bitstream)dso;
Element premis = new Element("premis", PREMIS_NS);
Element object = new Element("object", PREMIS_NS);
premis.addContent(object);
// objectIdentifier is required
Element oid = new Element("objectIdentifier", PREMIS_NS);
Element oit = new Element("objectIdentifierType", PREMIS_NS);
oit.setText("URL");
oid.addContent(oit);
Element oiv = new Element("objectIdentifierValue", PREMIS_NS);
// objectIdentifier value: by preference, if available:
// a. DSpace "persistent" URL to bitstream, if components available.
// b. name of bitstream, if any
// c. made-up name based on sequence ID and extension.
String sid = String.valueOf(bitstream.getSequenceID());
String baseUrl = ConfigurationManager.getProperty("dspace.url");
String handle = null;
// get handle of parent Item of this bitstream, if there is one:
Bundle[] bn = bitstream.getBundles();
if (bn.length > 0)
{
Item bi[] = bn[0].getItems();
if (bi.length > 0)
{
handle = bi[0].getHandle();
}
}
// get or make up name for bitstream:
String bsName = bitstream.getName();
if (bsName == null)
{
String ext[] = bitstream.getFormat().getExtensions();
bsName = "bitstream_"+sid+ (ext.length > 0 ? ext[0] : "");
}
if (handle != null && baseUrl != null)
{
oiv.setText(baseUrl
+ "/bitstream/"
+ URLEncoder.encode(handle, "UTF-8")
+ "/"
+ sid
+ "/"
+ URLEncoder.encode(bsName, "UTF-8"));
}
else
{
oiv.setText(URLEncoder.encode(bsName, "UTF-8"));
}
oid.addContent(oiv);
object.addContent(oid);
// objectCategory is fixed value, "File".
Element oc = new Element("objectCategory", PREMIS_NS);
oc.setText("File");
object.addContent(oc);
Element ochar = new Element("objectCharacteristics", PREMIS_NS);
object.addContent(ochar);
// checksum if available
String cks = bitstream.getChecksum();
String cka = bitstream.getChecksumAlgorithm();
if (cks != null && cka != null)
{
Element fixity = new Element("fixity", PREMIS_NS);
Element mda = new Element("messageDigestAlgorithm", PREMIS_NS);
mda.setText(cka);
fixity.addContent(mda);
Element md = new Element("messageDigest", PREMIS_NS);
md.setText(cks);
fixity.addContent(md);
ochar.addContent(fixity);
}
// size
Element size = new Element("size", PREMIS_NS);
size.setText(String.valueOf(bitstream.getSize()));
ochar.addContent(size);
// Punt and set formatName to the MIME type; the best we can
// do for now in the absence of any usable global format registries.
// objectCharacteristics/format/formatDesignation/
// formatName <- MIME Type
//
Element format = new Element("format", PREMIS_NS);
Element formatDes = new Element("formatDesignation", PREMIS_NS);
Element formatName = new Element("formatName", PREMIS_NS);
formatName.setText(bitstream.getFormat().getMIMEType());
formatDes.addContent(formatName);
format.addContent(formatDes);
ochar.addContent(format);
// originalName <- name (or source if none)
String oname = bitstream.getName();
if (oname == null)
{
oname = bitstream.getSource();
}
if (oname != null)
{
Element on = new Element("originalName", PREMIS_NS);
on.setText(oname);
object.addContent(on);
}
return premis;
}
public List<Element> disseminateList(DSpaceObject dso)
throws CrosswalkException,
IOException, SQLException, AuthorizeException
{
List<Element> result = new ArrayList<Element>(1);
result.add(disseminateElement(dso));
return result;
}
public boolean preferList()
{
return false;
}
}
| Java |
/**
* The contents of this file are subject to the license and copyright
* detailed in the LICENSE and NOTICE files at the root of the source
* tree and available online at
*
* http://www.dspace.org/license/
*/
package org.dspace.content.crosswalk;
import java.io.BufferedReader;
import java.io.File;
import java.io.FileReader;
import java.io.IOException;
import java.io.StringReader;
import java.sql.SQLException;
import java.util.ArrayList;
import java.util.Enumeration;
import java.util.HashMap;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import org.apache.log4j.Logger;
import org.dspace.authorize.AuthorizeException;
import org.dspace.content.DCValue;
import org.dspace.content.DSpaceObject;
import org.dspace.content.Item;
import org.dspace.core.ConfigurationManager;
import org.dspace.core.Constants;
import org.dspace.core.SelfNamedPlugin;
import org.jdom.Attribute;
import org.jdom.Element;
import org.jdom.Namespace;
import org.jdom.input.SAXBuilder;
import proj.oceandocs.utils.utilsXML;
/**
* Configurable AGRIS Crosswalk
* <p>
* This class supports multiple dissemination crosswalks from DSpace
* internal data to the AGRIS XML format
* (see <a href="http://www.fao.org/agris/tools/AGRIS_AP/WhatItIs.htm">http://www.fao.org/agris/tools/AGRIS_AP/WhatItIs.htm</a>.)
* <p>
* It registers multiple Plugin names, which it reads from
* the DSpace configuration as follows:
*
* <h3>Configuration</h3>
* Every key starting with <code>"crosswalk.argis.properties."</code> describes a
* AGRIS crosswalk. Everything after the last period is the <em>plugin name</em>,
* and the value is the pathname (relative to <code><em>dspace.dir</em>/config</code>)
* of the crosswalk configuration file.
* <p>
* You can have two names point to the same crosswalk,
* just add two configuration entries with the same value, e.g.
* <pre>
* crosswalk.argis.properties.ARGIS = crosswalks/argis.properties
* crosswalk.argis.properties.default = crosswalks/argis.properties
* </pre>
* The first line creates a plugin with the name <code>"ARGIS"</code>
* which is configured from the file <em>dspace-dir</em><code>/config/crosswalks/argis.properties</code>.
* <p>
* Since there is significant overhead in reading the properties file to
* configure the crosswalk, and a crosswalk instance may be used any number
* of times, we recommend caching one instance of the crosswalk for each
* name and simply reusing those instances. The PluginManager does this
* by default.
*/
public class AGRISDisseminationCrosswalk extends SelfNamedPlugin implements DisseminationCrosswalk
{
/** log4j category */
private static Logger log = Logger.getLogger(AGRISDisseminationCrosswalk.class);
private final static String CONFIG_PREFIX = "crosswalk.agris.properties.";
/**
* Fill in the plugin alias table from DSpace configuration entries
* for configuration files for flavors of ARGIS crosswalk:
*/
private static String aliases[] = null;
/**ARN number staff*/
private static String ARNcountry = "";
private static String ARNinstitute = "";
// private static String AsfaURI = "";
// private static String AgrovocURI = "";
/**
* ARGIS namespace.
*/
// public static final Namespace AGRIS_NS =
// Namespace.getNamespace("agris", "http://purl.org/agmes/agrisap/schema");
public static final Namespace AGS_NS =
Namespace.getNamespace("ags", "http://purl.org/agmes/1.1/");
public static final Namespace AGLS_NS =
Namespace.getNamespace("agls", "http://www.naa.gov.au/recordkeeping/gov_online/agls/1.2");
public static final Namespace DC_NS =
Namespace.getNamespace("dc", "http://purl.org/dc/elements/1.1/");
public static final Namespace DCTERMS_NS =
Namespace.getNamespace("dcterms", "http://purl.org/dc/terms/");
private static final Namespace namespaces[] =
{
/*AGRIS_NS,*/ AGS_NS, AGLS_NS, DC_NS, DCTERMS_NS
};
/** URL of ARGIS XML Schema */
// public static final String AGRIS_XSD = "http://www.fao.org/agris/agmes/schemas/agrisap.xsd";
// private static final String schemaLocation = AGRIS_NS.getURI() + " " + AGRIS_XSD;
//private static XMLOutputter XMLoutputer = new XMLOutputter(Format.getPrettyFormat());
private static SAXBuilder builder = new SAXBuilder();
private LinkedHashMap<String, String> agrisMap = null;
private Map<String, String> groupingLimits = null;
//private LinkedHashMap<String, String> elementsSequence = null;
static
{
List aliasList = new ArrayList();
Enumeration pe = ConfigurationManager.propertyNames();
while (pe.hasMoreElements())
{
String key = (String) pe.nextElement();
if (key.startsWith(CONFIG_PREFIX))
{
aliasList.add(key.substring(CONFIG_PREFIX.length()));
}
}
aliases = (String[]) aliasList.toArray(new String[aliasList.size()]);
/** get some parameters to generate ARN number later */
ARNcountry = ConfigurationManager.getProperty("ARN.coutrycode");
ARNinstitute = ConfigurationManager.getProperty("ARN.institutecode");
}
public static String[] getPluginNames()
{
return aliases;
}
/**
* Initialize Crosswalk table from a properties file
* which itself is the value of the DSpace configuration property
* "crosswalk.agris.properties.X", where "X" is the alias name of this instance.
* Each instance may be configured with a separate mapping table.
*
* The ARGIS crosswalk configuration properties follow the format:
*
* {field-name} = {XML-fragment} | {XPath}
*
* 1. qualified DC field name is of the form
* {MDschema}.{element}.{qualifier}
*
* e.g. dc.contributor.author
*
* 2. XML fragment is prototype of metadata element, with empty or "%s"
* placeholders for value(s). NOTE: Leave the %s's in because
* it's much easier then to see if something is broken.
*
* 3. XPath expression listing point(s) in the above XML where
* the value is to be inserted. Context is the element itself.
*
* Example properties line:
*
* dc.description.abstract = <dc:description><dcterms:abstract xml:lang="eng">%s</dcterms:abstract></dc:description> | dcterms:abstract/text()
*
*/
private void initMap()
throws CrosswalkInternalException
{
if ((agrisMap != null) && (agrisMap.size() > 0))
{
return;
}
String myAlias = getPluginInstanceName();
if (myAlias == null)
{
log.error("Must use PluginManager to instantiate AGRISDisseminationCrosswalk so the class knows its name.");
return;
}
String cmPropName = CONFIG_PREFIX + myAlias;
String propsFilename = ConfigurationManager.getProperty(cmPropName);
if (propsFilename == null)
{
String msg = "ARGIS crosswalk missing " + "configuration file for crosswalk named \"" + myAlias + "\"";
log.error(msg);
throw new CrosswalkInternalException(msg);
} else
{
String parent = ConfigurationManager.getProperty("dspace.dir")
+ File.separator + "config" + File.separator;
File propsFile = new File(parent, propsFilename);
try
{
BufferedReader br = new BufferedReader(new FileReader(propsFile));
agrisMap = new LinkedHashMap<String, String>();
groupingLimits = new HashMap<String, String>();
String[] props;
String line;
while ((line = br.readLine()) != null)
{
line = line.trim();
if (!line.startsWith("#") && !line.equals(""))
{
props = line.split("\\s+=\\s+");
if (props.length == 2)
{
String qdc = props[0].trim();
String val = props[1].trim();
String pair[] = val.split("\\s+\\|\\s+", 2);
if (pair.length < 1)
{
log.warn("Illegal ARGIS mapping in " + propsFile.toString() + ", line = "
+ qdc + " = " + val);
} else
{
agrisMap.put(qdc, pair[0]);
if (pair.length >= 2 && (!"".equals(pair[1])))
{
groupingLimits.put(qdc, pair[1].trim());
}
}
}
}
}
} catch (Exception e)
{
log.error("Error opening or reading ARGIS properties file: " + propsFile.toString() + ": " + e.toString());
throw new CrosswalkInternalException("ARGIS crosswalk cannot "
+ "open config file: " + e.toString());
}
}
}
@Override
public Namespace[] getNamespaces()
{
return namespaces;
}
@Override
public String getSchemaLocation()
{
return "http://purl.org/agmes/agrisap/schema";
}
@Override
public boolean canDisseminate(DSpaceObject dso)
{
if (dso.getType() == Constants.ITEM)
{
return true;
} else
{
return false;
}
}
@Override
public boolean preferList()
{
return false;
}
private Map<String, ArrayList<Element>> prepareTags(Map<String, ArrayList<DCValue>> metadata)
{
final String prolog = "<?xml version=\"1.0\" encoding=\"UTF-8\"?>"
+ "<ags:resources "
+ "xmlns:" + AGS_NS.getPrefix() + "=\"" + AGS_NS.getURI() + "\" "
+ "xmlns:" + DC_NS.getPrefix() + "=\"" + DC_NS.getURI() + "\" "
+ "xmlns:" + AGLS_NS.getPrefix() + "=\"" + AGLS_NS.getURI() + "\" "
+ "xmlns:" + DCTERMS_NS.getPrefix() + "=\"" + DCTERMS_NS.getURI() + "\">";
final String postlog = "</ags:resources>";
//$dc.element.qualifier|s$ like constructions will be replased by value of apropriate field
Pattern p = Pattern.compile("\\$(\\w+.\\w+.\\w+)\\|([s,a,l])\\$", Pattern.CASE_INSENSITIVE);
Matcher m;
DCValue tempDCV;
String subst = "";
Map<String, ArrayList<Element>> result = new LinkedHashMap<String, ArrayList<Element>>();
for (String field : agrisMap.keySet())
{
if (metadata.containsKey(field))
{
ArrayList<Element> elements = new ArrayList<Element>();
for (DCValue dcv : metadata.get(field))
{
StringBuffer sb = new StringBuffer();
sb.append(prolog);
String template = agrisMap.get(field);
template = template.replace("%s", dcv.value != null ? dcv.value : "");
template = template.replace("%a", dcv.authority != null ? dcv.authority : "");
template = template.replace("%l", dcv.language != null ? dcv.language : "");
template = template.replace("xml:lang=\"\"", "");
m = p.matcher(template);
while (m.find())
{
if (m.groupCount() == 2)
{
tempDCV = metadata.get(m.group(1)) != null ? metadata.get(m.group(1)).get(0) : null;
if (tempDCV != null)
{
if ("s".equalsIgnoreCase(m.group(2)))
{
subst = tempDCV.value != null ? tempDCV.value : "";
} else if ("a".equalsIgnoreCase(m.group(2)))
{
subst = tempDCV.authority != null ? tempDCV.authority : "";
} else if ("l".equalsIgnoreCase(m.group(2)))
{
subst = tempDCV.language != null ? tempDCV.language : "";
}
m.appendReplacement(sb, subst);
} else
{
m.appendReplacement(sb, "");
}
}
}
m.appendTail(sb);
sb.append(postlog);
try
{
Element tempRoot = builder.build(new StringReader((sb.toString()))).getRootElement();
elements.add(tempRoot);
} catch (Exception e)
{
log.error("AGRISDisseminationCrosswalk error: " + e.getLocalizedMessage());
}
}
result.put(field, elements);
}
}
return result;
}
@Override
public List<Element> disseminateList(DSpaceObject dso) throws CrosswalkException, IOException, SQLException, AuthorizeException
{
throw new UnsupportedOperationException("AGRIS dissemination as list of resources tags not applicable.");
}
@Override
public Element disseminateElement(DSpaceObject dso) throws CrosswalkException, IOException, SQLException, AuthorizeException
{
Item item = (Item) dso;
initMap();
DCValue[] dc;
Element root = new Element("resources", AGS_NS);
//root.setAttribute("schemaLocation", schemaLocation, XSI_NS);
root.addNamespaceDeclaration(DC_NS);
root.addNamespaceDeclaration(DCTERMS_NS);
root.addNamespaceDeclaration(AGLS_NS);
root.addNamespaceDeclaration(AGS_NS);
Element resource = new Element("resource", AGS_NS);
String arn = "", year = "";
dc = item.getMetadata("dc", "identifier", "arn", Item.ANY); // if ARN already put by user we will use it, if not then generate
if ((dc != null) && (dc.length > 0) && (dc[0].value != null) && ("".equals(dc[0].value)))
{
arn = dc[0].value;
} else
{
dc = item.getMetadata("dc", "date", "issued", Item.ANY);
if (dc.length > 0)
{
year = dc[0].value.split("-")[0];
}
arn = ARNcountry + year + ARNinstitute + String.format("%5s", ((item.getHandle().split("/").length == 2) ? item.getHandle().split("/")[1] : "")).replace(' ', '0');
}
if (!"".equals(arn))
{
resource.getAttributes().add(new Attribute("ARN", arn, AGS_NS));
}
HashMap<String, ArrayList<DCValue>> itemDCVs = new HashMap<String, ArrayList<DCValue>>();
DCValue[] dcvs = item.getMetadata(Item.ANY, Item.ANY, Item.ANY, Item.ANY);
for (int i = 0; i < dcvs.length; i++)
{
String qdc = dcvs[i].schema + "." + dcvs[i].element;
if (dcvs[i].qualifier != null)
{
qdc += "." + dcvs[i].qualifier;
}
if (!itemDCVs.containsKey(qdc))
{
ArrayList al = new ArrayList();
al.add(dcvs[i]);
itemDCVs.put(qdc, al);
} else
{
itemDCVs.get(qdc).add(dcvs[i]);
}
}
Map<String, ArrayList<Element>> tags = prepareTags(itemDCVs);
ArrayList<Element> temp = null;
List children;
String curKey = "";
try
{
String field = "";
for (Entry kvp : tags.entrySet())
{
curKey = (String) kvp.getKey();
field = groupingLimits.get(curKey);
temp = (ArrayList<Element>) kvp.getValue();
for (Element e : temp)
{
children = e.getChildren();
if (children != null && children.size() > 0)
{
utilsXML.mergeXMLTrees(resource, (Element) children.get(0), field);
}
}
}
root.addContent(resource);
} catch (Exception e)
{
log.error(getPluginInstanceName() + ": " + e.getLocalizedMessage());
} finally
{
return root;
}
}
}
| Java |
/**
* The contents of this file are subject to the license and copyright
* detailed in the LICENSE and NOTICE files at the root of the source
* tree and available online at
*
* http://www.dspace.org/license/
*/
package org.dspace.content.crosswalk;
import java.io.IOException;
import java.sql.SQLException;
import java.util.ArrayList;
import java.util.List;
import org.apache.commons.lang.ArrayUtils;
import org.dspace.authorize.AuthorizeException;
import org.dspace.content.DCValue;
import org.dspace.content.DSpaceObject;
import org.dspace.content.Item;
import org.dspace.core.Constants;
import org.jdom.Element;
import org.jdom.Namespace;
/**
* DIM dissemination crosswalk
* <p>
* Produces the metadata encoded in DSpace Intermediate Format, without the overhead of XSLT processing.
*
* @author Alexey Maslov
* @version $Revision: 1 $
*/
public class DIMDisseminationCrosswalk
implements DisseminationCrosswalk
{
// Non-existant XSD schema
public static final String DIM_XSD = "null";
// Namespaces
public static final Namespace DIM_NS =
Namespace.getNamespace("dim", "http://www.dspace.org/xmlns/dspace/dim");
private static final Namespace namespaces[] =
{
DIM_NS
};
public Namespace[] getNamespaces()
{
return (Namespace[]) ArrayUtils.clone(namespaces);
}
/* No schema for DIM */
public String getSchemaLocation()
{
return DIM_NS.getURI() + " " + DIM_XSD;
}
public Element disseminateElement(DSpaceObject dso) throws CrosswalkException, IOException, SQLException, AuthorizeException
{
if (dso.getType() != Constants.ITEM)
{
throw new CrosswalkObjectNotSupported("DIMDisseminationCrosswalk can only crosswalk an Item.");
}
Item item = (Item) dso;
DCValue[] dc = item.getMetadata(Item.ANY, Item.ANY, Item.ANY, Item.ANY);
Element dim = new Element("dim", DIM_NS);
for (int i = 0; i < dc.length; i++)
{
Element field = new Element("field", DIM_NS);
field.setAttribute("mdschema", dc[i].schema);
field.setAttribute("element", dc[i].element);
if (dc[i].qualifier != null)
{
field.setAttribute("qualifier", dc[i].qualifier);
}
if (dc[i].language != null)
{
field.setAttribute("lang", dc[i].language);
}
if (dc[i].value != null)
{
field.setText(dc[i].value);
}
if (dc[i].authority != null)
{
field.setAttribute("authority", dc[i].authority);
}
dim.addContent(field);
}
return dim;
}
public List<Element> disseminateList(DSpaceObject dso) throws CrosswalkException, IOException, SQLException, AuthorizeException
{
List<Element> result = new ArrayList<Element>(1);
result.add(disseminateElement(dso));
return result;
}
/* Only interested in disseminating items at this time */
public boolean canDisseminate(DSpaceObject dso)
{
return (dso.getType() == Constants.ITEM);
}
public boolean preferList()
{
return false;
}
}
| Java |
/**
* The contents of this file are subject to the license and copyright
* detailed in the LICENSE and NOTICE files at the root of the source
* tree and available online at
*
* http://www.dspace.org/license/
*/
package org.dspace.content.crosswalk;
public interface IConverter
{
/**
* Get an alternative format for the input string. Useful examples are
* conversion from a metadata language value in ISO-639-3 to ISO-639-1, etc.
*
* @param value
* the input string to convert
* @return the converted string returned by the "conversion algorithm"
*/
public String makeConversion(String value);
}
| Java |
/**
* The contents of this file are subject to the license and copyright
* detailed in the LICENSE and NOTICE files at the root of the source
* tree and available online at
*
* http://www.dspace.org/license/
*/
package org.dspace.content.crosswalk;
import org.dspace.content.packager.PackageParameters;
/**
* Packager Wrapping Crosswalk plugin
* <p>
* A Packager Wrapping Crosswalk is a crosswalk which works with or wraps
* an existing Packager for all or some of its crosswalking functionality.
* <p>
* Crosswalks which utilize Packagers for ingestion/dissemination do not always
* have enough information to call a Packager (org.dspace.content.packager.*)
* with the proper parameters. This abstract class allows the crosswalk to
* store its own PackageParameters and deposit license, which can then be
* used by the Crosswalk to call a PackageIngester or PackagerDisseminator
* with all the proper parameters.
*
* @author Tim Donohue
* @version $Revision: 3761 $
* @see IngestionCrosswalk
* @see DisseminationCrosswalk
* @see org.dspace.content.packager.PackageIngester
* @see org.dspace.content.packager.PackageDisseminator
* @see org.dspace.content.packager.PackageParameters
*/
public abstract class AbstractPackagerWrappingCrosswalk
{
// Crosswalk's PackageParameters, which can be used when calling/initializing a Packager during ingestion/dissemination
private PackageParameters packageParameters = null;
// Crosswalk's Ingestion License, which can be used when calling a Packager during ingestion
private String ingestionLicense = null;
/**
* Set custom packaging parameters for this Crosswalk.
* <p>
* These PackageParameters can be used to pass on to a Packager Plugin
* to actually perform the dissemination/ingestion required
* by this crosswalk.
* <p>
* The crosswalk itself can choose how to utilize or obey these
* PackageParameters. this method just provides the crosswalk
* access to these parameters, so that it can make smarter decisions
* about how to call a particular Packager.
*
* @param pparams PackageParameters to make available to the Crosswalk
*/
public void setPackagingParameters(PackageParameters pparams)
{
this.packageParameters = pparams;
}
/**
* Get custom packaging parameters for this Crosswalk.
* <p>
* These PackageParameters can be used to pass on to a Packager Plugin
* to actually perform the dissemination/ingestion required
* by this crosswalk.
* <p>
* The crosswalk itself can choose how to utilize or obey these
* PackageParameters. this method just provides the crosswalk
* access to these parameters, so that it can make smarter decisions
* about how to call a particular Packager.
*
* @return PackageParameters previously made available to the Crosswalk or null
*/
public PackageParameters getPackagingParameters()
{
return this.packageParameters;
}
/**
* Set custom ingestion license for this Crosswalk.
* <p>
* This license can be used by the crosswalk when calling a PackageIngester
*
* @param license the full text of the ingestion license
* @see org.dspace.content.packager.PackageIngester
*/
public void setIngestionLicense(String license)
{
this.ingestionLicense = license;
}
/**
* Get custom ingestion license for this Crosswalk.
* <p>
* This license can be used by the crosswalk when calling a PackageIngester
*
* @return the full text of the ingestion license as a String
* @see org.dspace.content.packager.PackageIngester
*/
public String getIngestionLicense()
{
return this.ingestionLicense;
}
}
| Java |
/**
* The contents of this file are subject to the license and copyright
* detailed in the LICENSE and NOTICE files at the root of the source
* tree and available online at
*
* http://www.dspace.org/license/
*/
package org.dspace.content.crosswalk;
import java.io.IOException;
import java.sql.SQLException;
import java.util.ArrayList;
import java.util.Enumeration;
import java.util.List;
import org.apache.commons.lang.ArrayUtils;
import org.apache.log4j.Logger;
import org.dspace.authorize.AuthorizeException;
import org.dspace.content.DCValue;
import org.dspace.content.DSpaceObject;
import org.dspace.content.Collection;
import org.dspace.content.Community;
import org.dspace.content.Item;
import org.dspace.content.Site;
import org.dspace.content.authority.Choices;
import org.dspace.core.ConfigurationManager;
import org.dspace.core.Constants;
import org.jdom.Document;
import org.jdom.Element;
import org.jdom.Namespace;
import org.jdom.Verifier;
import org.jdom.transform.XSLTransformException;
import org.jdom.transform.XSLTransformer;
/**
* Configurable XSLT-driven dissemination Crosswalk
* <p>
* See the XSLTCrosswalk superclass for details on configuration.
* <p>
* <h3>Additional Configuration of Dissemination crosswalk:</h3>
* The disseminator also needs to be configured with an XML Namespace
* (including prefix and URI) and an XML Schema for output format. This
* is configured on additional properties in the DSpace Configuration, i.e.:
* <pre>
* crosswalk.dissemination.<i>PluginName</i>.namespace.<i>Prefix</i> = <i>namespace-URI</i>
* crosswalk.dissemination.<i>PluginName</i>.schemaLocation = <i>schemaLocation value</i>
* crosswalk.dissemination.<i>PluginName</i>.preferList = <i>boolean</i> (default is false)
* </pre>
* For example:
* <pre>
* crosswalk.dissemination.qdc.namespace.dc = http://purl.org/dc/elements/1.1/
* crosswalk.dissemination.qdc.namespace.dcterms = http://purl.org/dc/terms/
* crosswalk.dissemination.qdc.schemaLocation = \
* http://purl.org/dc/elements/1.1/ http://dublincore.org/schemas/xmls/qdc/2003/04/02/qualifieddc.xsd
* crosswalk.dissemination.qdc.preferList = true
* </pre>
*
* @author Larry Stone
* @author Scott Phillips
* @version $Revision: 5844 $
* @see XSLTCrosswalk
*/
public class XSLTDisseminationCrosswalk
extends XSLTCrosswalk
implements DisseminationCrosswalk
{
/** log4j category */
private static Logger log = Logger.getLogger(XSLTDisseminationCrosswalk.class);
private static final String DIRECTION = "dissemination";
private static String aliases[] = makeAliases(DIRECTION);
public static String[] getPluginNames()
{
return (String[]) ArrayUtils.clone(aliases);
}
// namespace and schema; don't worry about initializing these
// until there's an instance, so do it in constructor.
private String schemaLocation = null;
private Namespace namespaces[] = null;
private boolean preferList = false;
// load the namespace and schema from config
private void init()
throws CrosswalkInternalException
{
if (namespaces != null || schemaLocation != null)
{
return;
}
String myAlias = getPluginInstanceName();
if (myAlias == null)
{
log.error("Must use PluginManager to instantiate XSLTDisseminationCrosswalk so the class knows its name.");
throw new CrosswalkInternalException("Must use PluginManager to instantiate XSLTDisseminationCrosswalk so the class knows its name.");
}
// all configs for this plugin instance start with this:
String prefix = CONFIG_PREFIX+DIRECTION+"."+myAlias+".";
// get the schema location string, should already be in the
// right format for value of "schemaLocation" attribute.
schemaLocation = ConfigurationManager.getProperty(prefix+"schemaLocation");
if (schemaLocation == null)
{
log.warn("No schemaLocation for crosswalk="+myAlias+", key="+prefix+"schemaLocation");
}
// sanity check: schemaLocation should have space.
else if (schemaLocation.length() > 0 && schemaLocation.indexOf(' ') < 0)
{
log.warn("Possible INVALID schemaLocation (no space found) for crosswalk="+
myAlias+", key="+prefix+"schemaLocation"+
"\n\tCorrect format is \"{namespace} {schema-URL}\"");
}
// grovel for namespaces of the form:
// crosswalk.diss.{PLUGIN_NAME}.namespace.{PREFIX} = {URI}
String nsPrefix = prefix + "namespace.";
Enumeration<String> pe = (Enumeration<String>)ConfigurationManager.propertyNames();
List<Namespace> nsList = new ArrayList<Namespace>();
while (pe.hasMoreElements())
{
String key = pe.nextElement();
if (key.startsWith(nsPrefix))
{
nsList.add(Namespace.getNamespace(key.substring(nsPrefix.length()),
ConfigurationManager.getProperty(key)));
}
}
namespaces = nsList.toArray(new Namespace[nsList.size()]);
preferList = ConfigurationManager.getBooleanProperty(prefix+"preferList", false);
}
/**
* Return the namespace used by this crosswalk.
*
* @see DisseminationCrosswalk
*/
public Namespace[] getNamespaces()
{
try
{
init();
}
catch (CrosswalkInternalException e)
{
log.error(e.toString());
}
return (Namespace[]) ArrayUtils.clone(namespaces);
}
/**
* Return the schema location used by this crosswalk.
*
* @see DisseminationCrosswalk
*/
public String getSchemaLocation()
{
try
{
init();
}
catch (CrosswalkInternalException e)
{
log.error(e.toString());
}
return schemaLocation;
}
/**
* Disseminate the DSpace item, collection, or community.
*
* @see DisseminationCrosswalk
*/
public Element disseminateElement(DSpaceObject dso)
throws CrosswalkException,
IOException, SQLException, AuthorizeException
{
int type = dso.getType();
if (!(type == Constants.ITEM ||
type == Constants.COLLECTION ||
type == Constants.COMMUNITY))
{
throw new CrosswalkObjectNotSupported("XSLTDisseminationCrosswalk can only crosswalk items, collections, and communities.");
}
init();
XSLTransformer xform = getTransformer(DIRECTION);
if (xform == null)
{
throw new CrosswalkInternalException("Failed to initialize transformer, probably error loading stylesheet.");
}
try
{
Document ddim = new Document(createDIM(dso));
Document result = xform.transform(ddim);
Element root = result.getRootElement();
root.detach();
return root;
}
catch (XSLTransformException e)
{
log.error("Got error: "+e.toString());
throw new CrosswalkInternalException("XSL translation failed: "+e.toString(), e);
}
}
/**
* Disseminate the DSpace item, collection, or community.
*
* @see DisseminationCrosswalk
*/
public List<Element> disseminateList(DSpaceObject dso)
throws CrosswalkException,
IOException, SQLException, AuthorizeException
{
int type = dso.getType();
if (!(type == Constants.ITEM ||
type == Constants.COLLECTION ||
type == Constants.COMMUNITY))
{
throw new CrosswalkObjectNotSupported("XSLTDisseminationCrosswalk can only crosswalk a items, collections, and communities.");
}
init();
XSLTransformer xform = getTransformer(DIRECTION);
if (xform == null)
{
throw new CrosswalkInternalException("Failed to initialize transformer, probably error loading stylesheet.");
}
try
{
return xform.transform(createDIM(dso).getChildren());
}
catch (XSLTransformException e)
{
log.error("Got error: "+e.toString());
throw new CrosswalkInternalException("XSL translation failed: "+e.toString(), e);
}
}
/**
* Determine is this crosswalk can dessiminate the given object.
*
* @see DisseminationCrosswalk
*/
public boolean canDisseminate(DSpaceObject dso)
{
return dso.getType() == Constants.ITEM;
}
/**
* return true if this crosswalk prefers the list form over an singe
* element, otherwise false.
*
* @see DisseminationCrosswalk
*/
public boolean preferList()
{
try
{
init();
}
catch (CrosswalkInternalException e)
{
log.error(e.toString());
}
return preferList;
}
/**
* Generate an intermediate representation of a DSpace object.
*
* @param dso The dspace object to build a representation of.
*/
public static Element createDIM(DSpaceObject dso, DCValue[] dcvs)
{
Element dim = new Element("dim", DIM_NS);
String type = Constants.typeText[dso.getType()];
dim.setAttribute("dspaceType",type);
for (int i = 0; i < dcvs.length; i++)
{
DCValue dcv = dcvs[i];
Element field =
createField(dcv.schema, dcv.element, dcv.qualifier,
dcv.language, dcv.value, dcv.authority, dcv.confidence);
dim.addContent(field);
}
return dim;
}
/**
* Generate an intermediate representation of a DSpace object.
*
* @param dso The dspace object to build a representation of.
*/
public static Element createDIM(DSpaceObject dso)
{
if (dso.getType() == Constants.ITEM)
{
Item item = (Item) dso;
return createDIM(dso, item.getMetadata(Item.ANY, Item.ANY, Item.ANY, Item.ANY));
}
else
{
Element dim = new Element("dim", DIM_NS);
String type = Constants.typeText[dso.getType()];
dim.setAttribute("dspaceType",type);
if (dso.getType() == Constants.COLLECTION)
{
Collection collection = (Collection) dso;
String description = collection.getMetadata("introductory_text");
String description_abstract = collection.getMetadata("short_description");
String description_table = collection.getMetadata("side_bar_text");
String identifier_uri = "hdl:" + collection.getHandle();
String provenance = collection.getMetadata("provenance_description");
String rights = collection.getMetadata("copyright_text");
String rights_license = collection.getMetadata("license");
String title = collection.getMetadata("name");
dim.addContent(createField("dc","description",null,null,description));
dim.addContent(createField("dc","description","abstract",null,description_abstract));
dim.addContent(createField("dc","description","tableofcontents",null,description_table));
dim.addContent(createField("dc","identifier","uri",null,identifier_uri));
dim.addContent(createField("dc","provenance",null,null,provenance));
dim.addContent(createField("dc","rights",null,null,rights));
dim.addContent(createField("dc","rights","license",null,rights_license));
dim.addContent(createField("dc","title",null,null,title));
}
else if (dso.getType() == Constants.COMMUNITY)
{
Community community = (Community) dso;
String description = community.getMetadata("introductory_text");
String description_abstract = community.getMetadata("short_description");
String description_table = community.getMetadata("side_bar_text");
String identifier_uri = "hdl:" + community.getHandle();
String rights = community.getMetadata("copyright_text");
String title = community.getMetadata("name");
dim.addContent(createField("dc","description",null,null,description));
dim.addContent(createField("dc","description","abstract",null,description_abstract));
dim.addContent(createField("dc","description","tableofcontents",null,description_table));
dim.addContent(createField("dc","identifier","uri",null,identifier_uri));
dim.addContent(createField("dc","rights",null,null,rights));
dim.addContent(createField("dc","title",null,null,title));
}
else if (dso.getType() == Constants.SITE)
{
Site site = (Site) dso;
String identifier_uri = "hdl:" + site.getHandle();
String title = site.getName();
String url = site.getURL();
//FIXME: adding two URIs for now (site handle and URL), in case site isn't using handles
dim.addContent(createField("dc","identifier","uri",null,identifier_uri));
dim.addContent(createField("dc","identifier","uri",null,url));
dim.addContent(createField("dc","title",null,null,title));
}
// XXX FIXME: Nothing to crosswalk for bitstream?
return dim;
}
}
/**
* Create a new DIM field element with the given attributes.
*
* @param schema The schema the DIM field belongs too.
* @param element The element the DIM field belongs too.
* @param qualifier The qualifier the DIM field belongs too.
* @param language The language the DIM field belongs too.
* @param value The value of the DIM field.
* @return A new DIM field element
*/
private static Element createField(String schema, String element, String qualifier, String language, String value)
{
return createField(schema, element, qualifier, language, value, null, -1);
}
/**
* Create a new DIM field element with the given attributes.
*
* @param schema The schema the DIM field belongs too.
* @param element The element the DIM field belongs too.
* @param qualifier The qualifier the DIM field belongs too.
* @param language The language the DIM field belongs too.
* @param value The value of the DIM field.
* @param authority The authority
* @param confidence confidence in the authority
* @return A new DIM field element
*/
private static Element createField(String schema, String element, String qualifier, String language, String value,
String authority, int confidence)
{
Element field = new Element("field",DIM_NS);
field.setAttribute("mdschema",schema);
field.setAttribute("element",element);
if (qualifier != null)
{
field.setAttribute("qualifier", qualifier);
}
if (language != null)
{
field.setAttribute("lang", language);
}
field.setText(checkedString(value));
if (authority != null)
{
field.setAttribute("authority", authority);
field.setAttribute("confidence", Choices.getConfidenceText(confidence));
}
return field;
}
// Return string with non-XML characters (i.e. low control chars) excised.
private static String checkedString(String value)
{
if (value == null)
{
return null;
}
String reason = Verifier.checkCharacterData(value);
if (reason == null)
{
return value;
}
else
{
if (log.isDebugEnabled())
{
log.debug("Filtering out non-XML characters in string, reason=" + reason);
}
StringBuffer result = new StringBuffer(value.length());
for (int i = 0; i < value.length(); ++i)
{
char c = value.charAt(i);
if (Verifier.isXMLCharacter((int)c))
{
result.append(c);
}
}
return result.toString();
}
}
}
| Java |
/**
* The contents of this file are subject to the license and copyright
* detailed in the LICENSE and NOTICE files at the root of the source
* tree and available online at
*
* http://www.dspace.org/license/
*/
package org.dspace.content.crosswalk;
import java.io.ByteArrayOutputStream;
import java.io.InputStream;
import java.io.IOException;
import java.sql.SQLException;
import org.apache.log4j.Logger;
import org.dspace.authorize.AuthorizeException;
import org.dspace.content.DSpaceObject;
import org.dspace.content.Item;
import org.dspace.content.packager.PackageUtils;
import org.dspace.core.Constants;
import org.dspace.core.Context;
import org.dspace.core.Utils;
/**
* Accept a DSpace deposit license.
* <p>
* Note that this is NOT needed when ingesting a DSpace AIP, since the
* deposit license is stored as a Bitstream (or two) in a dedicated Bundle;
* the normal apparatus of ingestig the AIP will restore that Bitstream
* with its proper name and thus the presence of the deposit license.
* <p>
* This crosswalk should only be used when ingesting other kinds of SIPs.
*
* @author Larry Stone
* @version $Revision: 1.0 $
*/
public class LicenseStreamIngestionCrosswalk
implements StreamIngestionCrosswalk
{
/** log4j logger */
private static Logger log = Logger.getLogger(LicenseStreamIngestionCrosswalk.class);
public void ingest(Context context, DSpaceObject dso, InputStream in, String MIMEType)
throws CrosswalkException, IOException, SQLException, AuthorizeException
{
// If package includes a Creative Commons license, add that:
if (dso.getType() == Constants.ITEM)
{
if (log.isDebugEnabled())
{
log.debug("Reading a DSpace Deposit license, MIMEtype=" + MIMEType);
}
ByteArrayOutputStream baos = new ByteArrayOutputStream();
Utils.copy(in, baos);
PackageUtils.addDepositLicense(context, baos.toString(),
(Item)dso, null);
}
}
public String getMIMEType()
{
return "text/plain";
}
}
| Java |
/**
* The contents of this file are subject to the license and copyright
* detailed in the LICENSE and NOTICE files at the root of the source
* tree and available online at
*
* http://www.dspace.org/license/
*/
package org.dspace.content.crosswalk;
import java.io.IOException;
import java.sql.SQLException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Date;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
import org.apache.commons.lang.ArrayUtils;
import org.dspace.authorize.AuthorizeException;
import org.dspace.content.Bitstream;
import org.dspace.content.Bundle;
import org.dspace.content.DCValue;
import org.dspace.content.DSpaceObject;
import org.dspace.content.Item;
import org.dspace.content.MetadataSchema;
import org.dspace.core.Constants;
import org.dspace.core.ConfigurationManager;
import org.dspace.core.Utils;
import org.jdom.Element;
import org.jdom.Namespace;
/**
* ORE dissemination crosswalk
* <p>
* Produces an Atom-encoded ORE aggregation of a DSpace item.
*
* @author Alexey Maslov
* @version $Revision: 1 $
*/
public class OREDisseminationCrosswalk
implements DisseminationCrosswalk
{
/* Schema for Atom only available in Relax NG format */
public static final String ATOM_RNG = "http://tweety.lanl.gov/public/schemas/2008-06/atom-tron.sch";
/* Namespaces */
public static final Namespace ATOM_NS =
Namespace.getNamespace("atom", "http://www.w3.org/2005/Atom");
private static final Namespace ORE_NS =
Namespace.getNamespace("ore", "http://www.openarchives.org/ore/terms/");
private static final Namespace ORE_ATOM =
Namespace.getNamespace("oreatom", "http://www.openarchives.org/ore/atom/");
private static final Namespace RDF_NS =
Namespace.getNamespace("rdf", "http://www.w3.org/1999/02/22-rdf-syntax-ns#");
private static final Namespace DCTERMS_NS =
Namespace.getNamespace("dcterms", "http://purl.org/dc/terms/");
private static final Namespace DS_NS =
Namespace.getNamespace("ds","http://www.dspace.org/objectModel/");
private static final Namespace namespaces[] = { ATOM_NS, ORE_NS, ORE_ATOM, RDF_NS, DCTERMS_NS, DS_NS };
public Namespace[] getNamespaces()
{
return (Namespace[]) ArrayUtils.clone(namespaces);
}
/* There is (and currently can be) no XSD schema that validates Atom feeds, only RNG */
public String getSchemaLocation()
{
return ATOM_NS.getURI() + " " + ATOM_RNG;
}
/**
* Disseminate an Atom-encoded ORE ReM mapped from a DSpace Item
* @param item
* @return
* @throws CrosswalkException
* @throws IOException
* @throws SQLException
* @throws AuthorizeException
*/
private Element disseminateItem(Item item) throws CrosswalkException, IOException, SQLException, AuthorizeException
{
String oaiUrl = null;
String dsUrl = ConfigurationManager.getProperty("dspace.url");
String remSource = ConfigurationManager.getProperty("ore.authoritative.source");
if (remSource == null || remSource.equalsIgnoreCase("oai"))
{
oaiUrl = ConfigurationManager.getProperty("dspace.oai.url");
}
else if (remSource.equalsIgnoreCase("xmlui") || remSource.equalsIgnoreCase("manakin"))
{
oaiUrl = dsUrl;
}
if (oaiUrl == null)
{
throw new CrosswalkInternalException("Base uri for the ore generator has not been set. Check the ore.authoritative.source setting.");
}
String uriA = oaiUrl + "/metadata/handle/" + item.getHandle() + "/ore.xml";
// Top level atom feed element
Element aggregation = new Element("entry",ATOM_NS);
aggregation.addNamespaceDeclaration(ATOM_NS);
aggregation.addNamespaceDeclaration(ORE_NS);
aggregation.addNamespaceDeclaration(ORE_ATOM);
aggregation.addNamespaceDeclaration(DCTERMS_NS);
// Atom-entry specific info
Element atomId = new Element("id",ATOM_NS);
atomId.addContent(uriA);
aggregation.addContent(atomId);
Element aggLink;
DCValue[] uris = item.getMetadata(MetadataSchema.DC_SCHEMA,"identifier","uri",Item.ANY);
for (DCValue uri : uris) {
aggLink = new Element("link",ATOM_NS);
aggLink.setAttribute("rel", "alternate");
aggLink.setAttribute("href", uri.value);
aggregation.addContent(aggLink);
}
// Information about the resource map, as separate entity from the aggregation it describes
Element uriALink = new Element("link",ATOM_NS);
uriALink.setAttribute("rel", "http://www.openarchives.org/ore/terms/describes");
uriALink.setAttribute("href", uriA);
Element uriRLink = new Element("link",ATOM_NS);
uriRLink.setAttribute("rel","self");
uriRLink.setAttribute("href", uriA + "#atom");
uriRLink.setAttribute("type","application/atom+xml");
Element remPublished = new Element("published",ATOM_NS);
remPublished.addContent(Utils.formatISO8601Date(new Date()));
Element remUpdated = new Element("updated",ATOM_NS);
remUpdated.addContent(Utils.formatISO8601Date(new Date()));
Element remCreator = new Element("source",ATOM_NS);
Element remGenerator = new Element("generator",ATOM_NS);
remGenerator.addContent(ConfigurationManager.getProperty("dspace.name"));
remGenerator.setAttribute("uri", oaiUrl);
remCreator.addContent(remGenerator);
aggregation.addContent(uriALink);
aggregation.addContent(uriRLink);
aggregation.addContent(remPublished);
aggregation.addContent(remUpdated);
aggregation.addContent(remCreator);
// Information about the aggregation (item) itself
Element aggTitle = new Element("title",ATOM_NS);
DCValue[] titles = item.getMetadata(MetadataSchema.DC_SCHEMA, "title", null, Item.ANY);
if (titles != null && titles.length>0)
{
aggTitle.addContent(titles[0].value);
}
else
{
aggTitle.addContent("");
}
aggregation.addContent(aggTitle);
Element aggAuthor;
Element aggAuthorName;
DCValue[] authors = item.getMetadata(MetadataSchema.DC_SCHEMA,"contributor","author",Item.ANY);
for (DCValue author : authors) {
aggAuthor = new Element("author",ATOM_NS);
aggAuthorName = new Element("name",ATOM_NS);
aggAuthorName.addContent(author.value);
aggAuthor.addContent(aggAuthorName);
aggregation.addContent(aggAuthor);
}
Element oreCategory = new Element("category",ATOM_NS);
oreCategory.setAttribute("scheme", ORE_NS.getURI());
oreCategory.setAttribute("term", ORE_NS.getURI()+"Aggregation");
oreCategory.setAttribute("label","Aggregation");
Element updateCategory = new Element("category",ATOM_NS);
updateCategory.setAttribute("scheme", ORE_ATOM.getURI()+"modified");
updateCategory.setAttribute("term", Utils.formatISO8601Date(item.getLastModified()));
Element dsCategory = new Element("category",ATOM_NS);
dsCategory.setAttribute("scheme", DS_NS.getURI());
dsCategory.setAttribute("term", "DSpaceItem");
dsCategory.setAttribute("label", "DSpace Item");
aggregation.addContent(oreCategory);
aggregation.addContent(updateCategory);
aggregation.addContent(dsCategory);
// metadata section
Element arLink;
Element rdfDescription, rdfType, dcModified, dcDesc;
Element triples = new Element("triples", ORE_ATOM);
// metadata about the item
rdfDescription = new Element("Description", RDF_NS);
rdfDescription.setAttribute("about", uriA, RDF_NS);
rdfType = new Element("type", RDF_NS);
rdfType.setAttribute("resource", DS_NS.getURI()+"DSpaceItem", RDF_NS);
dcModified = new Element("modified", DCTERMS_NS);
dcModified.addContent(Utils.formatISO8601Date(item.getLastModified()));
rdfDescription.addContent(rdfType);
rdfDescription.addContent(dcModified);
triples.addContent(rdfDescription);
// Add a link and an oreatom metadata entry for each bitstream in the item
Bundle[] bundles = item.getBundles();
Bitstream[] bitstreams;
for (Bundle bundle : bundles)
{
// Omit the special "ORE" bitstream
if (bundle.getName().equals("ORE"))
{
continue;
}
bitstreams = bundle.getBitstreams();
for (Bitstream bs : bitstreams)
{
arLink = new Element("link",ATOM_NS);
arLink.setAttribute("rel", ORE_NS.getURI()+"aggregates");
arLink.setAttribute("href",dsUrl + "/bitstream/handle/" + item.getHandle() + "/" + encodeForURL(bs.getName()) + "?sequence=" + bs.getSequenceID());
arLink.setAttribute("title",bs.getName());
arLink.setAttribute("type",bs.getFormat().getMIMEType());
arLink.setAttribute("length",Long.toString(bs.getSize()));
aggregation.addContent(arLink);
// metadata about the bitstream
rdfDescription = new Element("Description", RDF_NS);
rdfDescription.setAttribute("about", dsUrl + "/bitstream/handle/" + item.getHandle() + "/" + encodeForURL(bs.getName()) + "?sequence=" + bs.getSequenceID(), RDF_NS);
rdfType = new Element("type", RDF_NS);
rdfType.setAttribute("resource", DS_NS.getURI()+"DSpaceBitstream", RDF_NS);
dcDesc = new Element("description", DCTERMS_NS);
dcDesc.addContent(bundle.getName());
rdfDescription.addContent(rdfType);
rdfDescription.addContent(dcDesc);
triples.addContent(rdfDescription);
}
}
aggregation.addContent(triples);
// Add a link to the OAI-PMH served metadata (oai_dc is always on)
/*
Element pmhMeta = new Element("entry",ATOM_NS);
pUri = new Element("id",ATOM_NS);
String oaiId = new String("oai:" + ConfigurationManager.getProperty("dspace.hostname") + ":" + item.getHandle());
pUri.addContent(oaiId + "#oai_dc");
pmhMeta.addContent(pUri);
Element pmhAuthor = new Element("author",ATOM_NS);
Element pmhAuthorName = new Element("name",ATOM_NS);
Element pmhAuthorUri = new Element("uri",ATOM_NS);
pmhAuthorName.addContent(ConfigurationManager.getProperty("dspace.name"));
pmhAuthorUri.addContent(oaiUrl);
pmhAuthor.addContent(pmhAuthorName);
pmhAuthor.addContent(pmhAuthorUri);
pmhMeta.addContent(pmhAuthor);
arUri = new Element("link",ATOM_NS);
arUri.setAttribute("rel","alternate");
arUri.setAttribute("href",oaiUrl + "/request?verb=GetRecord&identifier=" + oaiId + "&metadataprefix=oai_dc");
pmhMeta.addContent(arUri);
Element rdfDesc = new Element("Description",RDF_NS);
rdfDesc.setAttribute("about",oaiUrl + "/request?verb=GetRecord&identifier=" + oaiId + "&metadataprefix=oai_dc",RDF_NS);
Element dcTerms = new Element("dcterms",DCTERMS_NS);
dcTerms.setAttribute("resource","http://www.openarchives.org/OAI/2.0/oai_dc/",RDF_NS);
rdfDesc.addContent(dcTerms);
pmhMeta.addContent(rdfDesc);
arUpdated = new Element("updated",ATOM_NS);
arUpdated.addContent(Utils.formatISO8601Date(item.getLastModified()));
pmhMeta.addContent(arUpdated);
arTitle = new Element("title",ATOM_NS);
arTitle.addContent("");
pmhMeta.addContent(arTitle);
aggregation.addContent(pmhMeta);*/
return aggregation;
}
public Element disseminateElement(DSpaceObject dso) throws CrosswalkException, IOException, SQLException, AuthorizeException
{
switch(dso.getType()) {
case Constants.ITEM: return disseminateItem((Item)dso);
case Constants.COLLECTION: break;
case Constants.COMMUNITY: break;
default: throw new CrosswalkObjectNotSupported("ORE implementation unable to disseminate unknown DSpace object.");
}
return null;
}
/**
* Helper method to escape all chaacters that are not part of the canon set
* @param sourceString source unescaped string
*/
private String encodeForURL(String sourceString) {
Character lowalpha[] = {'a' , 'b' , 'c' , 'd' , 'e' , 'f' , 'g' , 'h' , 'i' ,
'j' , 'k' , 'l' , 'm' , 'n' , 'o' , 'p' , 'q' , 'r' ,
's' , 't' , 'u' , 'v' , 'w' , 'x' , 'y' , 'z'};
Character upalpha[] = {'A' , 'B' , 'C' , 'D' , 'E' , 'F' , 'G' , 'H' , 'I' ,
'J' , 'K' , 'L' , 'M' , 'N' , 'O' , 'P' , 'Q' , 'R' ,
'S' , 'T' , 'U' , 'V' , 'W' , 'X' , 'Y' , 'Z'};
Character digit[] = {'0' , '1' , '2' , '3' , '4' , '5' , '6' , '7' , '8' , '9'};
Character mark[] = {'-' , '_' , '.' , '!' , '~' , '*' , '\'' , '(' , ')'};
// reserved
//Character reserved[] = {';' , '/' , '?' , ':' , '@' , '&' , '=' , '+' , '$' , ',' ,'%', '#'};
Set<Character> URLcharsSet = new HashSet<Character>();
URLcharsSet.addAll(Arrays.asList(lowalpha));
URLcharsSet.addAll(Arrays.asList(upalpha));
URLcharsSet.addAll(Arrays.asList(digit));
URLcharsSet.addAll(Arrays.asList(mark));
//URLcharsSet.addAll(Arrays.asList(reserved));
StringBuilder processedString = new StringBuilder();
for (int i=0; i<sourceString.length(); i++) {
char ch = sourceString.charAt(i);
if (URLcharsSet.contains(ch)) {
processedString.append(ch);
}
else {
processedString.append("%").append(Integer.toHexString((int)ch));
}
}
return processedString.toString();
}
public List<Element> disseminateList(DSpaceObject dso) throws CrosswalkException, IOException, SQLException, AuthorizeException
{
List<Element> result = new ArrayList<Element>(1);
result.add(disseminateElement(dso));
return result;
}
/* Only interested in disseminating items at this time */
public boolean canDisseminate(DSpaceObject dso)
{
return (dso.getType() == Constants.ITEM || dso.getType() == Constants.COLLECTION || dso.getType() == Constants.COMMUNITY);
}
public boolean preferList()
{
return false;
}
}
| Java |
/**
* The contents of this file are subject to the license and copyright
* detailed in the LICENSE and NOTICE files at the root of the source
* tree and available online at
*
* http://www.dspace.org/license/
*/
package org.dspace.content.crosswalk;
import java.io.IOException;
import java.util.List;
import java.util.ArrayList;
import java.sql.SQLException;
import org.dspace.core.Constants;
import org.dspace.core.Context;
import org.dspace.core.ConfigurationManager;
import org.dspace.content.DCValue;
import org.dspace.content.Item;
import org.dspace.content.Bitstream;
import org.dspace.content.BitstreamFormat;
import org.dspace.content.Collection;
import org.dspace.content.Community;
import org.dspace.content.DSpaceObject;
import org.dspace.content.Site;
import org.dspace.content.packager.PackageUtils;
import org.dspace.eperson.EPerson;
import org.dspace.authorize.AuthorizeException;
import org.apache.log4j.Logger;
import org.dspace.content.packager.DSpaceAIPIngester;
import org.dspace.content.packager.METSManifest;
import org.dspace.handle.HandleManager;
import org.jdom.Element;
import org.jdom.Namespace;
/**
* Crosswalk of technical metadata for DSpace AIP. This is
* only intended for use by the METS AIP packager. It borrows the
* DIM XML format and DC field names, although it abuses the meaning
* of Dublin Core terms and qualifiers because this format is
* ONLY FOR DSPACE INTERNAL USE AND INGESTION. It is needed to record
* a complete and accurate image of all of the attributes an object
* has in the RDBMS.
*
* It encodes the following common properties of all archival objects:
*
* identifier.uri -- persistent identifier of object in URI form (e.g. Handle URN)
* relation.isPartOf -- persistent identifier of object's parent in URI form (e.g. Handle URN)
* relation.isReferencedBy -- if relevant, persistent identifier of other objects that map this one as a child. May repeat.
*
* There may also be other fields, depending on the type of object,
* which encode attributes that are not part of the descriptive metadata and
* are not adequately covered by other technical MD formats (i.e. PREMIS).
*
* Configuration entries:
* aip.ingest.createEperson -- boolean, create EPerson for Submitter
* automatically, on ingest, if it doesn't exist.
*
* @author Larry Stone
* @version $Revision: 1.2 $
*/
public class AIPTechMDCrosswalk
implements DisseminationCrosswalk, IngestionCrosswalk
{
/** log4j category */
private static Logger log = Logger.getLogger(AIPTechMDCrosswalk.class);
/**
* Get XML namespaces of the elements this crosswalk may return.
* Returns the XML namespaces (as JDOM objects) of the root element.
*
* @return array of namespaces, which may be empty.
*/
@Override
public Namespace[] getNamespaces()
{
Namespace result[] = new Namespace[1];
result[0] = XSLTCrosswalk.DIM_NS;
return result;
}
/**
* Get the XML Schema location(s) of the target metadata format.
* Returns the string value of the <code>xsi:schemaLocation</code>
* attribute that should be applied to the generated XML.
* <p>
* It may return the empty string if no schema is known, but crosswalk
* authors are strongly encouraged to implement this call so their output
* XML can be validated correctly.
* @return SchemaLocation string, including URI namespace, followed by
* whitespace and URI of XML schema document, or empty string if unknown.
*/
@Override
public String getSchemaLocation()
{
return "";
}
/**
* Predicate: Can this disseminator crosswalk the given object.
* Needed by OAI-PMH server implementation.
*
* @param dso dspace object, e.g. an <code>Item</code>.
* @return true when disseminator is capable of producing metadata.
*/
@Override
public boolean canDisseminate(DSpaceObject dso)
{
//can only Disseminate SITE, COMMUNITY, COLLECTION, ITEM, BITSTREAM
return (dso.getType()==Constants.SITE
|| dso.getType()==Constants.COMMUNITY
|| dso.getType()==Constants.COLLECTION
|| dso.getType()==Constants.ITEM
|| dso.getType()==Constants.BITSTREAM);
}
/**
* Predicate: Does this disseminator prefer to return a list of Elements,
* rather than a single root Element?
* <p>
* Some metadata formats have an XML schema without a root element,
* for example, the Dublin Core and Qualified Dublin Core formats.
* This would be <code>true</code> for a crosswalk into QDC, since
* it would "prefer" to return a list, since any root element it has
* to produce would have to be part of a nonstandard schema. In
* most cases your implementation will want to return
* <code>false</code>
*
* @return true when disseminator prefers you call disseminateList().
*/
@Override
public boolean preferList()
{
return false;
}
/**
* Execute crosswalk, returning List of XML elements.
* Returns a <code>List</code> of JDOM <code>Element</code> objects representing
* the XML produced by the crosswalk. This is typically called when
* a list of fields is desired, e.g. for embedding in a METS document
* <code>xmlData</code> field.
* <p>
* When there are no results, an
* empty list is returned, but never <code>null</code>.
*
* @param dso the DSpace Object whose metadata to export.
* @return results of crosswalk as list of XML elements.
*
* @throws CrosswalkInternalException (<code>CrosswalkException</code>) failure of the crosswalk itself.
* @throws CrosswalkObjectNotSupported (<code>CrosswalkException</code>) Cannot crosswalk this kind of DSpace object.
* @throws IOException I/O failure in services this calls
* @throws SQLException Database failure in services this calls
* @throws AuthorizeException current user not authorized for this operation.
*/
@Override
public List<Element> disseminateList(DSpaceObject dso)
throws CrosswalkException, IOException, SQLException,
AuthorizeException
{
Element dim = disseminateElement(dso);
return dim.getChildren();
}
/**
* Execute crosswalk, returning one XML root element as
* a JDOM <code>Element</code> object.
* This is typically the root element of a document.
* <p>
*
* @param dso the DSpace Object whose metadata to export.
* @return root Element of the target metadata, never <code>null</code>
*
* @throws CrosswalkInternalException (<code>CrosswalkException</code>) failure of the crosswalk itself.
* @throws CrosswalkObjectNotSupported (<code>CrosswalkException</code>) Cannot crosswalk this kind of DSpace object.
* @throws IOException I/O failure in services this calls
* @throws SQLException Database failure in services this calls
* @throws AuthorizeException current user not authorized for this operation.
*/
@Override
public Element disseminateElement(DSpaceObject dso)
throws CrosswalkException, IOException, SQLException,
AuthorizeException
{
List<DCValue> dc = new ArrayList<DCValue>();
if (dso.getType() == Constants.ITEM)
{
Item item = (Item)dso;
EPerson is = item.getSubmitter();
if (is != null)
{
dc.add(makeDC("creator", null, is.getEmail()));
}
dc.add(makeDC("identifier", "uri", "hdl:" + item.getHandle()));
Collection owningColl = item.getOwningCollection();
String owner = owningColl.getHandle();
if (owner != null)
{
dc.add(makeDC("relation", "isPartOf", "hdl:" + owner));
}
Collection inColl[] = item.getCollections();
for (int i = 0; i < inColl.length; ++i)
{
if (inColl[i].getID() != owningColl.getID())
{
String h = inColl[i].getHandle();
if (h != null)
{
dc.add(makeDC("relation", "isReferencedBy", "hdl:" + h));
}
}
}
if (item.isWithdrawn())
{
dc.add(makeDC("rights", "accessRights", "WITHDRAWN"));
}
}
else if (dso.getType() == Constants.BITSTREAM)
{
Bitstream bitstream = (Bitstream)dso;
String bsName = bitstream.getName();
if (bsName != null)
{
dc.add(makeDC("title", null, bsName));
}
String bsSource = bitstream.getSource();
if (bsSource != null)
{
dc.add(makeDC("title", "alternative", bsSource));
}
String bsDesc = bitstream.getDescription();
if (bsDesc != null)
{
dc.add(makeDC("description", null, bsDesc));
}
String bsUfmt = bitstream.getUserFormatDescription();
if (bsUfmt != null)
{
dc.add(makeDC("format", null, bsUfmt));
}
BitstreamFormat bsf = bitstream.getFormat();
dc.add(makeDC("format", "medium", bsf.getShortDescription()));
dc.add(makeDC("format", "mimetype", bsf.getMIMEType()));
dc.add(makeDC("format", "supportlevel", bsf.getSupportLevelText()));
dc.add(makeDC("format", "internal", Boolean.toString(bsf.isInternal())));
}
else if (dso.getType() == Constants.COLLECTION)
{
Collection collection = (Collection)dso;
dc.add(makeDC("identifier", "uri", "hdl:" + dso.getHandle()));
Community owners[] = collection.getCommunities();
String ownerHdl = owners[0].getHandle();
if (ownerHdl != null)
{
dc.add(makeDC("relation", "isPartOf", "hdl:" + ownerHdl));
}
for (int i = 1; i < owners.length; ++i)
{
String h = owners[i].getHandle();
if (h != null)
{
dc.add(makeDC("relation", "isReferencedBy", "hdl:" + h));
}
}
}
else if (dso.getType() == Constants.COMMUNITY)
{
Community community = (Community)dso;
dc.add(makeDC("identifier", "uri", "hdl:" + dso.getHandle()));
Community owner = community.getParentCommunity();
String ownerHdl = null;
if (owner == null)
{
ownerHdl = Site.getSiteHandle();
}
else
{
ownerHdl = owner.getHandle();
}
if (ownerHdl != null)
{
dc.add(makeDC("relation", "isPartOf", "hdl:" + ownerHdl));
}
}
else if (dso.getType() == Constants.SITE)
{
Site site = (Site) dso;
//FIXME: adding two URIs for now (site handle and URL), in case site isn't using handles
dc.add(makeDC("identifier", "uri", "hdl:" + site.getHandle()));
dc.add(makeDC("identifier", "uri", site.getURL()));
}
DCValue result[] = (DCValue[])dc.toArray(new DCValue[dc.size()]);
return XSLTDisseminationCrosswalk.createDIM(dso, result);
}
private static DCValue makeDC(String element, String qualifier, String value)
{
DCValue dcv = new DCValue();
dcv.schema = "dc";
dcv.language = null;
dcv.element = element;
dcv.qualifier = qualifier;
dcv.value = value;
return dcv;
}
/**
* Ingest a whole document. Build Document object around root element,
* and feed that to the transformation, since it may get handled
* differently than a List of metadata elements.
*/
@Override
public void ingest(Context context, DSpaceObject dso, Element root)
throws CrosswalkException, IOException, SQLException, AuthorizeException
{
ingest(context, dso, root.getChildren());
}
/**
* Translate metadata with XSL stylesheet and ingest it.
* Translation produces a list of DIM "field" elements;
* these correspond directly to Item.addMetadata() calls so
* they are simply executed.
*/
@Override
public void ingest(Context context, DSpaceObject dso, List<Element> dimList)
throws CrosswalkException,
IOException, SQLException, AuthorizeException
{
int type = dso.getType();
// accumulate values for bitstream format in case we have to make one
String bsfShortName = null;
String bsfMIMEType = null;
int bsfSupport = BitstreamFormat.KNOWN;
boolean bsfInternal = false;
for (Element field : dimList)
{
// if we get <dim> in a list, recurse.
if (field.getName().equals("dim") && field.getNamespace().equals(XSLTCrosswalk.DIM_NS))
{
ingest(context, dso, field.getChildren());
}
else if (field.getName().equals("field") && field.getNamespace().equals(XSLTCrosswalk.DIM_NS))
{
String schema = field.getAttributeValue("mdschema");
if (schema.equals("dc"))
{
String dcField = field.getAttributeValue("element");
String qualifier = field.getAttributeValue("qualifier");
if (qualifier != null)
{
dcField += "." + qualifier;
}
String value = field.getText();
if (type == Constants.BITSTREAM)
{
Bitstream bitstream = (Bitstream)dso;
if (dcField.equals("title"))
{
bitstream.setName(value);
}
else if (dcField.equals("title.alternative"))
{
bitstream.setSource(value);
}
else if (dcField.equals("description"))
{
bitstream.setDescription(value);
}
else if (dcField.equals("format"))
{
bitstream.setUserFormatDescription(value);
}
else if (dcField.equals("format.medium"))
{
bsfShortName = value;
}
else if (dcField.equals("format.mimetype"))
{
bsfMIMEType = value;
}
else if (dcField.equals("format.supportlevel"))
{
int sl = BitstreamFormat.getSupportLevelID(value);
if (sl < 0)
{
throw new MetadataValidationException("Got unrecognized value for bitstream support level: " + value);
}
else
{
bsfSupport = sl;
}
}
else if (dcField.equals("format.internal"))
{
bsfInternal = (Boolean.valueOf(value)).booleanValue();
}
else
{
log.warn("Got unrecognized DC field for Bitstream: " + dcField);
}
}
else if (type == Constants.ITEM)
{
Item item = (Item)dso;
// item submitter
if (dcField.equals("creator"))
{
EPerson sub = EPerson.findByEmail(context, value);
// if eperson doesn't exist yet, optionally create it:
if (sub == null)
{
//This class works in conjunction with the DSpaceAIPIngester.
// so, we'll use the configuration settings for that ingester
String configName = new DSpaceAIPIngester().getConfigurationName();
//Create the EPerson if specified and person doesn't already exit
if (ConfigurationManager.getBooleanProperty(METSManifest.CONFIG_METS_PREFIX + configName + ".ingest.createSubmitter"))
{
sub = EPerson.create(context);
sub.setEmail(value);
sub.setCanLogIn(false);
sub.update();
}
else
{
log.warn("Ignoring unknown Submitter=" + value + " in AIP Tech MD, no matching EPerson and 'mets.dspaceAIP.ingest.createSubmitter' is false in dspace.cfg.");
}
}
if (sub != null)
{
item.setSubmitter(sub);
}
}
else if (dcField.equals("rights.accessRights"))
{
//check if item is withdrawn
if (value.equalsIgnoreCase("WITHDRAWN"))
{
item.withdraw();
}
}
else if(dcField.equals("identifier.uri") ||
dcField.equals("relation.isPartOf"))
{
// Ignore identifier.uri (which specifies object handle)
// and relation.isPartOf (which specifies primary parent object)
// Both of these should already be set on object, as they
// are required/generated when a DSpaceObject is created.
}
else if (dcField.equals("relation.isReferencedBy"))
{
// This Item is referenced by other Collections. This means
// it has been mapped into one or more additional collections.
// We'll attempt to map it to all referenced collections.
// But if this is a recursive ingest, it is possible some of these
// collections may not have been created yet. No need to worry,
// when each Collection is created it will create any mappings that
// we were unable to create now.
String parentHandle = value;
if(parentHandle!=null && !parentHandle.isEmpty())
{
//Remove 'hdl:' prefix, if it exists
if (parentHandle.startsWith("hdl:"))
{
parentHandle = parentHandle.substring(4);
}
//Get parent object (if it exists)
DSpaceObject parentDso = HandleManager.resolveToObject(context, parentHandle);
//For Items, this parent *must* be a Collection
if(parentDso!=null && parentDso.getType()==Constants.COLLECTION)
{
Collection collection = (Collection) parentDso;
//If this item is not already mapped into this collection, map it!
if (!item.isIn(collection))
{
collection.addItem(item);
}
}
}
}
else
{
log.warn("Got unrecognized DC field for Item: " + dcField);
}
}
else if (type == Constants.COMMUNITY || type == Constants.COLLECTION)
{
if (dcField.equals("identifier.uri") || dcField.equals("relation.isPartOf"))
{
// Ignore identifier.uri (which specifies object handle)
// and relation.isPartOf (which specifies primary parent object)
// Both of these should already be set on object, as they
// are required/generated when a DSpaceObject is created.
}
else if (dcField.equals("relation.isReferencedBy"))
{
// Ignore relation.isReferencedBy since it only
// lists _extra_ mapped parents, not the primary one.
// DSpace currently doesn't fully support mapping of Collections/Communities
}
else
{
log.warn("Got unrecognized DC field for Collection/Community: " + dcField);
}
}
}
else
{
log.warn("Skipping DIM field with mdschema=\"" + schema + "\".");
}
}
else
{
log.error("Got unexpected element in DIM list: "+field.toString());
throw new MetadataValidationException("Got unexpected element in DIM list: "+field.toString());
}
}
// final step: find or create bitstream format since it
// takes the accumulation of a few values:
if (type == Constants.BITSTREAM && bsfShortName != null)
{
BitstreamFormat bsf = BitstreamFormat.findByShortDescription(context, bsfShortName);
if (bsf == null && bsfMIMEType != null)
{
bsf = PackageUtils.findOrCreateBitstreamFormat(context,
bsfShortName,
bsfMIMEType,
bsfShortName,
bsfSupport,
bsfInternal);
}
if (bsf != null)
{
((Bitstream) dso).setFormat(bsf);
}
else
{
log.warn("Failed to find or create bitstream format named \"" + bsfShortName + "\"");
}
}
}
}
| Java |
/**
* The contents of this file are subject to the license and copyright
* detailed in the LICENSE and NOTICE files at the root of the source
* tree and available online at
*
* http://www.dspace.org/license/
*/
package org.dspace.content.crosswalk;
import java.io.FileInputStream;
import java.io.IOException;
import java.sql.SQLException;
import java.util.Iterator;
import java.util.List;
import org.apache.commons.lang.ArrayUtils;
import org.apache.log4j.Logger;
import org.dspace.authorize.AuthorizeException;
import org.dspace.content.DSpaceObject;
import org.dspace.content.Collection;
import org.dspace.content.Community;
import org.dspace.content.Item;
import org.dspace.content.MetadataField;
import org.dspace.content.MetadataSchema;
import org.dspace.content.authority.Choices;
import org.dspace.content.packager.PackageUtils;
import org.dspace.core.Constants;
import org.dspace.core.Context;
import org.dspace.core.PluginManager;
import org.jdom.Document;
import org.jdom.Element;
import org.jdom.input.SAXBuilder;
import org.jdom.output.Format;
import org.jdom.output.XMLOutputter;
import org.jdom.transform.XSLTransformException;
import org.jdom.transform.XSLTransformer;
/**
* Configurable XSLT-driven ingestion Crosswalk
* <p>
* See the XSLTCrosswalk superclass for details on configuration.
*
* @author Larry Stone
* @version $Revision: 5844 $
* @see XSLTCrosswalk
*/
public class XSLTIngestionCrosswalk
extends XSLTCrosswalk
implements IngestionCrosswalk
{
/** log4j category */
private static final Logger log = Logger.getLogger(XSLTIngestionCrosswalk.class);
private static final String DIRECTION = "submission";
private static String aliases[] = makeAliases(DIRECTION);
public static String[] getPluginNames()
{
return (String[]) ArrayUtils.clone(aliases);
}
// apply metadata values returned in DIM to the target item.
private static void applyDim(List<Element> dimList, Item item)
throws MetadataValidationException
{
for (Element elt : dimList)
{
if ("field".equals(elt.getName()) && DIM_NS.equals(elt.getNamespace()))
{
applyDimField(elt, item);
}
else if ("dim".equals(elt.getName()) && DIM_NS.equals(elt.getNamespace()))
{
// if it's a <dim> container, apply its guts
applyDim(elt.getChildren(), item);
}
else
{
log.error("Got unexpected element in DIM list: "+elt.toString());
throw new MetadataValidationException("Got unexpected element in DIM list: "+elt.toString());
}
}
}
// adds the metadata element from one <field>
private static void applyDimField(Element field, Item item)
{
String schema = field.getAttributeValue("mdschema");
String element = field.getAttributeValue("element");
String qualifier = field.getAttributeValue("qualifier");
String lang = field.getAttributeValue("lang");
String authority = field.getAttributeValue("authority");
String sconf = field.getAttributeValue("confidence");
// sanity check: some XSL puts an empty string in qualifier,
// change it to null so we match the unqualified DC field:
if (qualifier != null && qualifier.equals(""))
{
qualifier = null;
}
if ((authority != null && authority.length() > 0) ||
(sconf != null && sconf.length() > 0))
{
int confidence = (sconf != null && sconf.length() > 0) ?
Choices.getConfidenceValue(sconf) : Choices.CF_UNSET;
item.addMetadata(schema, element, qualifier, lang, field.getText(), authority, confidence);
}
else
{
item.addMetadata(schema, element, qualifier, lang, field.getText());
}
}
/**
* Translate metadata with XSL stylesheet and ingest it.
* Translation produces a list of DIM "field" elements;
* these correspond directly to Item.addMetadata() calls so
* they are simply executed.
*/
public void ingest(Context context, DSpaceObject dso, List<Element> metadata)
throws CrosswalkException,
IOException, SQLException, AuthorizeException
{
XSLTransformer xform = getTransformer(DIRECTION);
if (xform == null)
{
throw new CrosswalkInternalException("Failed to initialize transformer, probably error loading stylesheet.");
}
try
{
List dimList = xform.transform(metadata);
ingestDIM(context, dso, dimList);
}
catch (XSLTransformException e)
{
log.error("Got error: "+e.toString());
throw new CrosswalkInternalException("XSL Transformation failed: "+e.toString(), e);
}
}
/**
* Ingest a whole document. Build Document object around root element,
* and feed that to the transformation, since it may get handled
* differently than a List of metadata elements.
*/
public void ingest(Context context, DSpaceObject dso, Element root)
throws CrosswalkException, IOException, SQLException, AuthorizeException
{
XSLTransformer xform = getTransformer(DIRECTION);
if (xform == null)
{
throw new CrosswalkInternalException("Failed to initialize transformer, probably error loading stylesheet.");
}
try
{
Document dimDoc = xform.transform(new Document((Element)root.clone()));
ingestDIM(context, dso, dimDoc.getRootElement().getChildren());
}
catch (XSLTransformException e)
{
log.error("Got error: "+e.toString());
throw new CrosswalkInternalException("XSL Transformation failed: "+e.toString(), e);
}
}
// return coll/comm "metadata" label corresponding to a DIM field.
private static String getMetadataForDIM(Element field)
{
// make up fieldname, then look for it in xwalk
String element = field.getAttributeValue("element");
String qualifier = field.getAttributeValue("qualifier");
String fname = "dc." + element;
if (qualifier != null)
{
fname += "." + qualifier;
}
return PackageUtils.dcToContainerMetadata(fname);
}
/**
* Ingest a DIM metadata expression directly, without
* translating some other format into DIM.
* The <code>dim</code> element is expected to be be the root of
* a DIM document.
* <p>
* Note that this is ONLY implemented for Item, Collection, and
* Community objects. Also only works for the "dc" metadata schema.
* <p>
* @param context the context
* @param dso object into which to ingest metadata
* @param dim root of a DIM expression
*/
public static void ingestDIM(Context context, DSpaceObject dso, Element dim)
throws CrosswalkException,
IOException, SQLException, AuthorizeException
{
ingestDIM(context, dso, dim.getChildren());
}
public static void ingestDIM(Context context, DSpaceObject dso, List<Element> fields)
throws CrosswalkException,
IOException, SQLException, AuthorizeException
{
int type = dso.getType();
if (type == Constants.ITEM)
{
Item item = (Item)dso;
applyDim(fields, item);
}
else if (type == Constants.COLLECTION ||
type == Constants.COMMUNITY)
{
for (Element field : fields)
{
String schema = field.getAttributeValue("mdschema");
if ("dim".equals(field.getName()) && DIM_NS.equals(field.getNamespace()))
{
ingestDIM(context, dso, field.getChildren());
}
else if ("field".equals(field.getName()) &&
DIM_NS.equals(field.getNamespace()) &&
schema != null && "dc".equals(schema))
{
String md = getMetadataForDIM(field);
if (md == null)
{
log.warn("Cannot map to Coll/Comm metadata field, DIM element=" +
field.getAttributeValue("element") + ", qualifier=" + field.getAttributeValue("qualifier"));
}
else
{
if (type == Constants.COLLECTION)
{
((Collection) dso).setMetadata(md, field.getText());
}
else
{
((Community) dso).setMetadata(md, field.getText());
}
}
}
else
{
log.warn("ignoring unrecognized DIM element: " + field.toString());
}
}
}
else
{
throw new CrosswalkObjectNotSupported("XsltSubmissionionCrosswalk can only crosswalk to an Item.");
}
}
/**
* Simple command-line rig for testing the DIM output of a stylesheet.
* Usage: java XSLTIngestionCrosswalk <crosswalk-name> <input-file>
*/
public static void main(String[] argv) throws Exception
{
if (argv.length < 2)
{
System.err.println("Usage: java XSLTIngestionCrosswalk [-l] <crosswalk-name> <input-file>");
System.exit(1);
}
int i = 0;
boolean list = false;
// skip first arg if it's the list option
if (argv.length > 2 && argv[0].equals("-l"))
{
++i;
list = true;
}
IngestionCrosswalk xwalk = (IngestionCrosswalk)PluginManager.getNamedPlugin(
IngestionCrosswalk.class, argv[i]);
if (xwalk == null)
{
System.err.println("Error, cannot find an IngestionCrosswalk plugin for: \""+argv[i]+"\"");
System.exit(1);
}
XSLTransformer xform = ((XSLTIngestionCrosswalk)xwalk).getTransformer(DIRECTION);
if (xform == null)
{
throw new CrosswalkInternalException("Failed to initialize transformer, probably error loading stylesheet.");
}
SAXBuilder builder = new SAXBuilder();
Document inDoc = builder.build(new FileInputStream(argv[i+1]));
XMLOutputter outputter = new XMLOutputter(Format.getPrettyFormat());
Document dimDoc = null;
List dimList = null;
if (list)
{
dimList = xform.transform(inDoc.getRootElement().getChildren());
outputter.output(dimList, System.out);
}
else
{
dimDoc = xform.transform(inDoc);
outputter.output(dimDoc, System.out);
dimList = dimDoc.getRootElement().getChildren();
}
// Sanity-check the generated DIM, make sure it would load.
Context context = new Context();
Iterator di = dimList.iterator();
while (di.hasNext())
{
// skip over comment, text and other trash some XSLs generate..
Object o = di.next();
if (!(o instanceof Element))
{
continue;
}
Element elt = (Element)o;
if (elt.getName().equals("field") && elt.getNamespace().equals(DIM_NS))
{
String schema = elt.getAttributeValue("mdschema");
String element = elt.getAttributeValue("element");
String qualifier = elt.getAttributeValue("qualifier");
MetadataSchema ms = MetadataSchema.find(context, schema);
if (ms == null )
{
System.err.println("DIM Error, Cannot find metadata schema for: schema=\""+schema+
"\" (... element=\""+element+"\", qualifier=\""+qualifier+"\")");
}
else
{
if (qualifier != null && qualifier.equals(""))
{
System.err.println("DIM Warning, qualifier is empty string: "+
" schema=\""+schema+"\", element=\""+element+"\", qualifier=\""+qualifier+"\"");
qualifier = null;
}
MetadataField mf = MetadataField.findByElement(context,
ms.getSchemaID(), element, qualifier);
if (mf == null)
{
System.err.println("DIM Error, Cannot find metadata field for: schema=\"" + schema +
"\", element=\"" + element + "\", qualifier=\"" + qualifier + "\"");
}
}
}
else
{
// ("Got unexpected element in DIM list: "+elt.toString());
throw new MetadataValidationException("Got unexpected element in DIM list: "+elt.toString());
}
}
}
}
| Java |
/**
* The contents of this file are subject to the license and copyright
* detailed in the LICENSE and NOTICE files at the root of the source
* tree and available online at
*
* http://www.dspace.org/license/
*/
package org.dspace.content.crosswalk;
import java.io.IOException;
import java.io.OutputStream;
import java.sql.SQLException;
import org.dspace.authorize.AuthorizeException;
import org.dspace.content.DSpaceObject;
import org.dspace.core.Context;
/**
* A class implementing this interface crosswalk metadata directly
* from a DSpace Object to an output stream, in a specific format.
* <p>
* Stream-oriented crosswalks are intended to be used for metadata
* formats which are either (a) not XML-based, or (b) too bulky for the
* DOM-ish in-memory model developed for the METS and IMSCP packagers.
* The METS packagers (all subclasses of AbstractMETSDisseminator / AbstractMETSIngester
* are equipped to call these crosswalks as well as the XML-based ones,
* just refer to the desired crosswalk by its plugin name.
*
* @author Larry Stone
* @version $Revision: 5844 $
*/
public interface StreamDisseminationCrosswalk
{
/**
* Predicate: Can this disseminator crosswalk the given object.
*
* @param dso dspace object, e.g. an <code>Item</code>.
* @return true when disseminator is capable of producing metadata.
*/
public boolean canDisseminate(Context context, DSpaceObject dso);
/**
* Execute crosswalk on the given object, sending output to the stream.
*
* @param context the DSpace context
* @param dso the DSpace Object whose metadata to export.
* @param out output stream to write to
*
* @throws CrosswalkInternalException (<code>CrosswalkException</code>) failure of the crosswalk itself.
* @throws CrosswalkObjectNotSupported (<code>CrosswalkException</code>) Cannot crosswalk this kind of DSpace object.
* @throws IOException I/O failure in services this calls
* @throws SQLException Database failure in services this calls
* @throws AuthorizeException current user not authorized for this operation.
*/
public void disseminate(Context context, DSpaceObject dso, OutputStream out)
throws CrosswalkException, IOException, SQLException, AuthorizeException;
public String getMIMEType();
}
| Java |
/**
* The contents of this file are subject to the license and copyright
* detailed in the LICENSE and NOTICE files at the root of the source
* tree and available online at
*
* http://www.dspace.org/license/
*/
package org.dspace.content.crosswalk;
import java.io.BufferedReader;
import java.io.FileInputStream;
import java.io.StringReader;
import java.sql.SQLException;
import java.util.ArrayList;
import java.util.Enumeration;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import org.apache.commons.lang.ArrayUtils;
import org.apache.log4j.Logger;
import org.dspace.authorize.AuthorizeException;
import org.dspace.content.DCValue;
import org.dspace.content.DSpaceObject;
import org.dspace.content.Collection;
import org.dspace.content.Community;
import org.dspace.content.Item;
import org.dspace.content.Site;
import org.dspace.core.ConfigurationManager;
import org.dspace.core.Constants;
import org.dspace.core.SelfNamedPlugin;
import org.jdom.Element;
import org.jdom.Namespace;
import org.jdom.Verifier;
import org.jdom.input.SAXBuilder;
import java.io.File;
import java.io.FileReader;
import java.io.IOException;
import java.util.LinkedHashMap;
import java.util.Map.Entry;
import proj.oceandocs.utils.utilsXML;
/**
* Configurable MODS Crosswalk
* <p>
* This class supports multiple dissemination crosswalks from DSpace
* internal data to the MODS XML format
* (see <a href="http://www.loc.gov/standards/mods/">http://www.loc.gov/standards/mods/</a>.)
* <p>
* It registers multiple Plugin names, which it reads from
* the DSpace configuration as follows:
*
* <h3>Configuration</h3>
* Every key starting with <code>"crosswalk.mods.properties."</code> describes a
* MODS crosswalk. Everything after the last period is the <em>plugin name</em>,
* and the value is the pathname (relative to <code><em>dspace.dir</em>/config</code>)
* of the crosswalk configuration file.
* <p>
* You can have two names point to the same crosswalk,
* just add two configuration entries with the same value, e.g.
* <pre>
* crosswalk.mods.properties.MODS = crosswalks/mods.properties
* crosswalk.mods.properties.default = crosswalks/mods.properties
* </pre>
* The first line creates a plugin with the name <code>"MODS"</code>
* which is configured from the file <em>dspace-dir</em><code>/config/crosswalks/mods.properties</code>.
* <p>
* Since there is significant overhead in reading the properties file to
* configure the crosswalk, and a crosswalk instance may be used any number
* of times, we recommend caching one instance of the crosswalk for each
* name and simply reusing those instances. The PluginManager does this
* by default.
*
* @author Larry Stone
* @author Scott Phillips
* @version $Revision: 5844 $
*/
public class MODSDisseminationCrosswalk extends SelfNamedPlugin
implements DisseminationCrosswalk
{
/** log4j category */
private static Logger log = Logger.getLogger(MODSDisseminationCrosswalk.class);
private static final String CONFIG_PREFIX = "crosswalk.mods.properties.";
/**
* Fill in the plugin alias table from DSpace configuration entries
* for configuration files for flavors of MODS crosswalk:
*/
private static String aliases[] = null;
static
{
List<String> aliasList = new ArrayList<String>();
Enumeration<String> pe = (Enumeration<String>) ConfigurationManager.propertyNames();
while (pe.hasMoreElements())
{
String key = pe.nextElement();
if (key.startsWith(CONFIG_PREFIX))
{
aliasList.add(key.substring(CONFIG_PREFIX.length()));
}
}
aliases = (String[]) aliasList.toArray(new String[aliasList.size()]);
}
public static String[] getPluginNames()
{
return (String[]) ArrayUtils.clone(aliases);
}
/**
* MODS namespace.
*/
public static final Namespace MODS_NS = Namespace.getNamespace("mods", "http://www.loc.gov/mods/v3");
private static final Namespace XLINK_NS = Namespace.getNamespace("xlink", "http://www.w3.org/1999/xlink");
private static final Namespace namespaces[] =
{
MODS_NS, XLINK_NS
};
/** URL of MODS XML Schema */
public static final String MODS_XSD = "http://www.loc.gov/standards/mods/v3/mods-3-3.xsd";
private static final String schemaLocation = MODS_NS.getURI() + " " + MODS_XSD;
private static SAXBuilder builder = new SAXBuilder();
private LinkedHashMap<String, String> modsMap = null;
private Map<String, String> groupingLimits = null;
/**
* Initialize Crosswalk table from a properties file
* which itself is the value of the DSpace configuration property
* "crosswalk.mods.properties.X", where "X" is the alias name of this instance.
* Each instance may be configured with a separate mapping table.
*
* The MODS crosswalk configuration properties follow the format:
*
* {field-name} = {XML-fragment}
*
* 1. qualified DC field name is of the form
* {MDschema}.{element}.{qualifier}
*
* e.g. dc.contributor.author
*
* 2. XML fragment is prototype of metadata element, with empty or "%s", "%a", "%l"
* placeholders for value(s), authority value(s), language attribute value(s).
*
* Example properties line:
*
* dc.description.abstract = <mods:abstract>%s</mods:abstract>
*
*/
private void initMap() throws CrosswalkInternalException
{
if (modsMap != null)
{
return;
}
String myAlias = getPluginInstanceName();
if (myAlias == null)
{
log.error("Must use PluginManager to instantiate MODSDisseminationCrosswalk so the class knows its name.");
return;
}
String cmPropName = CONFIG_PREFIX + myAlias;
String propsFilename = ConfigurationManager.getProperty(cmPropName);
if (propsFilename == null)
{
String msg = "MODS crosswalk missing "
+ "configuration file for crosswalk named \"" + myAlias + "\"";
log.error(msg);
throw new CrosswalkInternalException(msg);
} else
{
String parent = ConfigurationManager.getProperty("dspace.dir")
+ File.separator + "config" + File.separator;
File propsFile = new File(parent, propsFilename);
try
{
BufferedReader br = new BufferedReader(new FileReader(propsFile));
modsMap = new LinkedHashMap<String, String>();
groupingLimits = new HashMap<String, String>();
String[] props;
String line;
while ((line = br.readLine()) != null)
{
line = line.trim();
if (!line.startsWith("#") && !line.equals(""))
{
props = line.split("\\s+=\\s+");
if (props.length == 2)
{
String qdc = props[0].trim();
String val = props[1].trim();
String pair[] = val.split("\\s+\\|\\s+", 2);
if (pair.length < 1)
{
log.warn("Illegal MODS mapping in " + propsFile.toString() + ", line = "
+ qdc + " = " + val);
} else
{
modsMap.put(qdc, pair[0]);
if (pair.length >= 2 && (!"".equals(pair[1])))
{
groupingLimits.put(qdc, pair[1].trim());
}
}
}
}
}
} catch (Exception e)
{
log.error("Error opening or reading MODS properties file: " + propsFile.toString() + ": " + e.toString());
throw new CrosswalkInternalException("MODS crosswalk cannot "
+ "open config file: " + e.toString());
}
}
}
/**
* Return the MODS namespace
*/
@Override
public Namespace[] getNamespaces()
{
return (Namespace[]) ArrayUtils.clone(namespaces);
}
/**
* Return the MODS schema
*/
@Override
public String getSchemaLocation()
{
return schemaLocation;
}
private Map<String, ArrayList<Element>> prepareTags(Map<String, ArrayList<DCValue>> metadata)
{
//StringBuilder result = new StringBuilder();
//$dc.element.qualifier|s$ like constructions will be replased by value of apropriate field
Pattern p = Pattern.compile("\\$(\\w+.\\w+.\\w+)\\|([s,a,l])\\$", Pattern.CASE_INSENSITIVE);
Matcher m;
DCValue tempDCV;
String subst = "";
Map<String, ArrayList<Element>> result = new LinkedHashMap<String, ArrayList<Element>>();
for (String field : modsMap.keySet())
{
if (metadata.containsKey(field))
{
ArrayList<Element> elements = new ArrayList<Element>();
for (DCValue dcv : metadata.get(field))
{
StringBuffer sb = new StringBuffer();
String template = modsMap.get(field);
template = template.replace("%s", dcv.value != null ? dcv.value : "");
template = template.replace("%a", dcv.authority != null ? dcv.authority : "");
template = template.replace("%l", dcv.language != null ? dcv.language : "");
template = template.replace("xml:lang=\"\"", "");
m = p.matcher(template);
while (m.find())
{
if (m.groupCount() == 2)
{
tempDCV = metadata.get(m.group(1)) != null ? metadata.get(m.group(1)).get(0) : null;
if (tempDCV != null)
{
if ("s".equalsIgnoreCase(m.group(2)))
{
subst = tempDCV.value != null ? tempDCV.value : "";
} else if ("a".equalsIgnoreCase(m.group(2)))
{
subst = tempDCV.authority != null ? tempDCV.authority : "";
} else if ("l".equalsIgnoreCase(m.group(2)))
{
subst = tempDCV.language != null ? tempDCV.language : "";
}
m.appendReplacement(sb, subst);
} else
{
m.appendReplacement(sb, "");
}
}
}
m.appendTail(sb);
try
{
Element tempRoot = builder.build(new StringReader((sb.toString()))).getRootElement();
elements.add(tempRoot);
} catch (Exception e)
{
log.error("AGRISDisseminationCrosswalk error: " + e.getLocalizedMessage());
}
}
result.put(field, elements);
}
}
return result;
}
/**
* Returns object's metadata in MODS format, as List of XML structure nodes.
*/
@Override
public List<Element> disseminateList(DSpaceObject dso) throws CrosswalkException,
IOException, SQLException, AuthorizeException
{
throw new UnsupportedOperationException("MODS dissemination as list of mods tags not applicable.");
}
/**
* Disseminate an Item, Collection, or Community to MODS.
*/
@Override
public Element disseminateElement(DSpaceObject dso)
throws CrosswalkException,
IOException, SQLException, AuthorizeException
{
Element root = new Element("modsCollection");
root.addNamespaceDeclaration(XLINK_NS);
root.addNamespaceDeclaration(XSI_NS);
root.setAttribute("schemaLocation", schemaLocation, XSI_NS);
DCValue[] dcvs = null;
if (dso.getType() == Constants.ITEM)
{
dcvs = item2Metadata((Item) dso);
} else if (dso.getType() == Constants.COLLECTION)
{
dcvs = collection2Metadata((Collection) dso);
} else if (dso.getType() == Constants.COMMUNITY)
{
dcvs = community2Metadata((Community) dso);
} else if (dso.getType() == Constants.SITE)
{
dcvs = site2Metadata((Site) dso);
} else
{
throw new CrosswalkObjectNotSupported(
"MODSDisseminationCrosswalk can only crosswalk Items, Collections, or Communities");
}
initMap();
HashMap<String, ArrayList<DCValue>> itemDCVs = new HashMap<String, ArrayList<DCValue>>();
for (int i = 0; i < dcvs.length; i++)
{
String qdc = dcvs[i].schema + "." + dcvs[i].element;
if (dcvs[i].qualifier != null)
{
qdc += "." + dcvs[i].qualifier;
}
if (!itemDCVs.containsKey(qdc))
{
ArrayList al = new ArrayList();
al.add(dcvs[i]);
itemDCVs.put(qdc, al);
} else
{
itemDCVs.get(qdc).add(dcvs[i]);
}
}
Map<String, ArrayList<Element>> tags = prepareTags(itemDCVs);
ArrayList<Element> temp = null;
String curKey = "";
try
{
Element mods = new Element("mods");
mods.setAttribute("version", "3.3");
root.getChildren().add(mods);
String field = "";
for (Entry kvp : tags.entrySet())
{
curKey = (String) kvp.getKey();
field = groupingLimits.get(curKey);
temp = (ArrayList<Element>) kvp.getValue();
for (Element e : temp)
{
utilsXML.mergeXMLTrees(mods, e, field);
}
}
} catch (Exception e)
{
log.error(getPluginInstanceName() + ": " + e.getLocalizedMessage());
} finally
{
return root;
}
}
/**
* ModsCrosswalk can disseminate: Items, Collections, Communities, and Site.
*/
@Override
public boolean canDisseminate(DSpaceObject dso)
{
return (dso.getType() == Constants.ITEM
|| dso.getType() == Constants.COLLECTION
|| dso.getType() == Constants.COMMUNITY
|| dso.getType() == Constants.SITE);
}
/**
* ModsCrosswalk prefer's element form over list.
*/
@Override
public boolean preferList()
{
return false;
}
/**
* Generate a list of metadata elements for the given DSpace
* site.
*
* @param site
* The site to derive metadata from
*/
protected DCValue[] site2Metadata(Site site)
{
List<DCValue> metadata = new ArrayList<DCValue>();
String identifier_uri = "http://hdl.handle.net/"
+ site.getHandle();
String title = site.getName();
String url = site.getURL();
if (identifier_uri != null)
{
metadata.add(createDCValue("identifier.uri", null, identifier_uri));
}
//FIXME: adding two URIs for now (site handle and URL), in case site isn't using handles
if (url != null)
{
metadata.add(createDCValue("identifier.uri", null, url));
}
if (title != null)
{
metadata.add(createDCValue("title", null, title));
}
return (DCValue[]) metadata.toArray(new DCValue[metadata.size()]);
}
/**
* Generate a list of metadata elements for the given DSpace
* community.
*
* @param community
* The community to derive metadata from
*/
protected DCValue[] community2Metadata(Community community)
{
List<DCValue> metadata = new ArrayList<DCValue>();
String description = community.getMetadata("introductory_text");
String description_abstract = community.getMetadata("short_description");
String description_table = community.getMetadata("side_bar_text");
String identifier_uri = "http://hdl.handle.net/"
+ community.getHandle();
String rights = community.getMetadata("copyright_text");
String title = community.getMetadata("name");
if (description != null)
{
metadata.add(createDCValue("description", null, description));
}
if (description_abstract != null)
{
metadata.add(createDCValue("description", "abstract", description_abstract));
}
if (description_table != null)
{
metadata.add(createDCValue("description", "tableofcontents", description_table));
}
if (identifier_uri != null)
{
metadata.add(createDCValue("identifier.uri", null, identifier_uri));
}
if (rights != null)
{
metadata.add(createDCValue("rights", null, rights));
}
if (title != null)
{
metadata.add(createDCValue("title", null, title));
}
return (DCValue[]) metadata.toArray(new DCValue[metadata.size()]);
}
/**
* Generate a list of metadata elements for the given DSpace
* collection.
*
* @param collection
* The collection to derive metadata from
*/
protected DCValue[] collection2Metadata(Collection collection)
{
List<DCValue> metadata = new ArrayList<DCValue>();
String description = collection.getMetadata("introductory_text");
String description_abstract = collection.getMetadata("short_description");
String description_table = collection.getMetadata("side_bar_text");
String identifier_uri = "http://hdl.handle.net/"
+ collection.getHandle();
String provenance = collection.getMetadata("provenance_description");
String rights = collection.getMetadata("copyright_text");
String rights_license = collection.getMetadata("license");
String title = collection.getMetadata("name");
if (description != null)
{
metadata.add(createDCValue("description", null, description));
}
if (description_abstract != null)
{
metadata.add(createDCValue("description", "abstract", description_abstract));
}
if (description_table != null)
{
metadata.add(createDCValue("description", "tableofcontents", description_table));
}
if (identifier_uri != null)
{
metadata.add(createDCValue("identifier", "uri", identifier_uri));
}
if (provenance != null)
{
metadata.add(createDCValue("provenance", null, provenance));
}
if (rights != null)
{
metadata.add(createDCValue("rights", null, rights));
}
if (rights_license != null)
{
metadata.add(createDCValue("rights.license", null, rights_license));
}
if (title != null)
{
metadata.add(createDCValue("title", null, title));
}
return (DCValue[]) metadata.toArray(new DCValue[metadata.size()]);
}
/**
* Generate a list of metadata elements for the given DSpace item.
*
* @param item
* The item to derive metadata from
*/
protected DCValue[] item2Metadata(Item item)
{
DCValue[] dcvs = item.getMetadata(Item.ANY, Item.ANY, Item.ANY,
Item.ANY);
return dcvs;
}
private DCValue createDCValue(String element, String qualifier, String value)
{
DCValue dcv = new DCValue();
dcv.schema = "dc";
dcv.element = element;
dcv.qualifier = qualifier;
dcv.value = value;
return dcv;
}
// check for non-XML characters
private String checkedString(String value)
{
if (value == null)
{
return null;
}
String reason = Verifier.checkCharacterData(value);
if (reason == null)
{
return value;
} else
{
if (log.isDebugEnabled())
{
log.debug("Filtering out non-XML characters in string, reason=" + reason);
}
StringBuilder result = new StringBuilder(value.length());
for (int i = 0; i < value.length(); ++i)
{
char c = value.charAt(i);
if (Verifier.isXMLCharacter((int) c))
{
result.append(c);
}
}
return result.toString();
}
}
}
| Java |
/**
* The contents of this file are subject to the license and copyright
* detailed in the LICENSE and NOTICE files at the root of the source
* tree and available online at
*
* http://www.dspace.org/license/
*/
package org.dspace.content.crosswalk;
import java.io.File;
import java.io.FileInputStream;
import java.io.IOException;
import java.io.StringReader;
import java.sql.SQLException;
import java.util.ArrayList;
import java.util.Enumeration;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import org.apache.commons.lang.ArrayUtils;
import org.apache.log4j.Logger;
import org.dspace.authorize.AuthorizeException;
import org.dspace.content.DCValue;
import org.dspace.content.DSpaceObject;
import org.dspace.content.Item;
import org.dspace.content.MetadataSchema;
import org.dspace.core.ConfigurationManager;
import org.dspace.core.Constants;
import org.dspace.core.Context;
import org.dspace.core.SelfNamedPlugin;
import org.jdom.Document;
import org.jdom.Element;
import org.jdom.Namespace;
import org.jdom.input.SAXBuilder;
/**
* Configurable QDC Crosswalk
* <p>
* This class supports multiple dissemination crosswalks from DSpace
* internal data to the Qualified Dublin Core XML format
* (see <a href="http://dublincore.org/">http://dublincore.org/</a>
* <p>
* It registers multiple Plugin names, which it reads from
* the DSpace configuration as follows:
*
* <h3>Configuration</h3>
* Every key starting with <code>"crosswalk.qdc.properties."</code> describes a
* QDC crosswalk. Everything after the last period is the <em>plugin instance</em>,
* and the value is the pathname (relative to <code><em>dspace.dir</em>/config</code>)
* of the crosswalk configuration file.
* <p>
* You can have two aliases point to the same crosswalk,
* just add two configuration entries with the same value, e.g.
* <pre>
* crosswalk.qdc.properties.QDC = xwalk/qdc.properties
* crosswalk.qdc.properties.default = xwalk/qdc.properties
* </pre>
* The first line creates a plugin with the name <code>"QDC"</code>
* which is configured from the file <em>dspace-dir</em><code>/xwalk/qdc.properties</code>.
* <p>
* Since there is significant overhead in reading the properties file to
* configure the crosswalk, and a crosswalk instance may be used any number
* of times, we recommend caching one instance of the crosswalk for each
* alias and simply reusing those instances. The PluginManager does
* this by default.
* <p>
* Each named crosswalk has two other types of configuration lines:
* <p>
* XML Namespaces: all XML namespace prefixes used in the XML fragments below
* <em>must</em> be defined in the configuration as follows. Add a line of
* the form: <pre>
* crosswalk.qdc.namespace.{NAME}.{prefix} = {namespace-URI}</pre>
* e.g. for the namespaces <code>dc</code> and <code>dcterms</code>
* in the plugin named <code>QDC</code>, add these lines:
* <pre>crosswalk.qdc.namespace.QDC.dc = http://purl.org/dc/elements/1.1/
* crosswalk.qdc.namespace.QDC.dcterms = http://purl.org/dc/terms/</pre>
*
* <p>
* Finally, you need to declare an XML Schema URI for the plugin, with
* a line of the form <pre>
* crosswalk.qdc.schema.{NAME} = {schema-URI}</pre>
* for example,
* <pre>crosswalk.qdc.schemaLocation.QDC = \
* http://purl.org/dc/terms/ \
* http://dublincore.org/schemas/xmls/qdc/2003/04/02/qualifieddc.xsd</pre>
*
* @author Larry Stone
* @version $Revision: 5844 $
*/
public class QDCCrosswalk extends SelfNamedPlugin
implements DisseminationCrosswalk, IngestionCrosswalk
{
/** log4j category */
private static Logger log = Logger.getLogger(QDCCrosswalk.class);
// map of qdc to JDOM Element
private Map<String, Element> qdc2element = new HashMap<String, Element>();
// map of JDOM Element to qdc DCValue
private Map<String, String> element2qdc = new HashMap<String, String>();
// the XML namespaces from config file for this name.
private Namespace namespaces[] = null;
private static final Namespace DCTERMS_NS =
Namespace.getNamespace("dcterms", "http://purl.org/dc/terms/");
// sentinal: done init?
private boolean inited = false;
// my plugin name
private String myName = null;
// prefix of all DSpace Configuration entries.
private static final String CONFIG_PREFIX = "crosswalk.qdc";
// XML schemaLocation fragment for this crosswalk, from config.
private String schemaLocation = null;
private static SAXBuilder builder = new SAXBuilder();
/**
* Fill in the plugin-name table from DSpace configuration entries
* for configuration files for flavors of QDC crosswalk:
*/
private static String aliases[] = null;
static
{
List<String> aliasList = new ArrayList<String>();
Enumeration<String> pe = (Enumeration<String>)ConfigurationManager.propertyNames();
String propname = CONFIG_PREFIX + ".properties.";
while (pe.hasMoreElements())
{
String key = pe.nextElement();
if (key.startsWith(propname))
{
aliasList.add(key.substring(propname.length()));
}
}
aliases = (String[])aliasList.toArray(new String[aliasList.size()]);
}
public static String[] getPluginNames()
{
return (String[]) ArrayUtils.clone(aliases);
}
// utility: return "fully qualified" name of XML element, for a
// hashtable key to use on ingesting elements.
// Format is {prefix:}name where prefix is optional.
private String makeQualifiedTagName(Element element)
{
String prefix = "";
Namespace ns = element.getNamespace();
if (ns != null)
{
prefix = ns.getPrefix() + ":";
}
String tagName;
String nsQualifier = element.getAttributeValue("type", DisseminationCrosswalk.XSI_NS);
if (nsQualifier == null || nsQualifier.length() < 1)
{
String qualifier = element.getAttributeValue("type");
if (qualifier == null || qualifier.length() < 1)
{
tagName = prefix+element.getName();
}
else
{
tagName = prefix+element.getName()+qualifier;
}
}
else
{
tagName = prefix+element.getName()+nsQualifier;
}
return tagName;
}
/**
* Initialize Crosswalk table from a properties file
* which itself is the value of the DSpace configuration property
* "crosswalk.qdc.properties.X", where "X" is the alias name of this instance.
* Each instance may be configured with a separate mapping table.
*
* The QDC crosswalk configuration properties follow the format:
*
* {qdc-element} = {XML-fragment}
*
* 1. qualified DC field name is of the form (qualifier is optional)
* {MDschema}.{element}.{qualifier}
*
* e.g. dc.contributor.author
* dc.title
*
* 2. XML fragment is prototype of metadata element, with empty
* placeholders for value).
*
* Example properties line:
*
* dc.coverage.temporal = <dcterms:temporal />
*/
private void init()
throws CrosswalkException, IOException
{
if (inited)
{
return;
}
inited = true;
myName = getPluginInstanceName();
if (myName == null)
{
throw new CrosswalkInternalException("Cannot determine plugin name, " +
"You must use PluginManager to instantiate QDCCrosswalk so the instance knows its name.");
}
// grovel DSpace configuration for namespaces
List<Namespace> nsList = new ArrayList<Namespace>();
Enumeration<String> pe = (Enumeration<String>)ConfigurationManager.propertyNames();
String propname = CONFIG_PREFIX + ".namespace."+ myName +".";
while (pe.hasMoreElements())
{
String key = pe.nextElement();
if (key.startsWith(propname))
{
nsList.add(Namespace.getNamespace(key.substring(propname.length()),
ConfigurationManager.getProperty(key)));
}
}
nsList.add(Namespace.XML_NAMESPACE);
namespaces = (Namespace[])nsList.toArray(new Namespace[nsList.size()]);
// get XML schemaLocation fragment from config
schemaLocation = ConfigurationManager.getProperty(CONFIG_PREFIX + ".schemaLocation."+ myName);
// read properties
String cmPropName = CONFIG_PREFIX+".properties."+myName;
String propsFilename = ConfigurationManager.getProperty(cmPropName);
if (propsFilename == null)
{
throw new CrosswalkInternalException("Configuration error: " +
"No properties file configured for QDC crosswalk named \"" + myName + "\"");
}
String parent = ConfigurationManager.getProperty("dspace.dir") +
File.separator + "config" + File.separator;
File propsFile = new File(parent, propsFilename);
Properties qdcProps = new Properties();
FileInputStream pfs = null;
try
{
pfs = new FileInputStream(propsFile);
qdcProps.load(pfs);
}
finally
{
if (pfs != null)
{
try
{
pfs.close();
}
catch (IOException ioe)
{
}
}
}
// grovel properties to initialize qdc->element and element->qdc maps.
// evaluate the XML fragment with a wrapper including namespaces.
String postlog = "</wrapper>";
StringBuffer prologb = new StringBuffer("<wrapper");
for (int i = 0; i < namespaces.length; ++i)
{
prologb.append(" xmlns:");
prologb.append(namespaces[i].getPrefix());
prologb.append("=\"");
prologb.append(namespaces[i].getURI());
prologb.append("\"");
}
prologb.append(">");
String prolog = prologb.toString();
pe = (Enumeration<String>)qdcProps.propertyNames();
while (pe.hasMoreElements())
{
String qdc = pe.nextElement();
String val = qdcProps.getProperty(qdc);
try
{
Document d = builder.build(new StringReader(prolog+val+postlog));
Element element = (Element)d.getRootElement().getContent(0);
qdc2element.put(qdc, element);
element2qdc.put(makeQualifiedTagName(element), qdc);
log.debug("Building Maps: qdc=\""+qdc+"\", element=\""+element.toString()+"\"");
}
catch (org.jdom.JDOMException je)
{
throw new CrosswalkInternalException("Failed parsing XML fragment in properties file: \""+prolog+val+postlog+"\": "+je.toString(), je);
}
}
}
public Namespace[] getNamespaces()
{
try
{
init();
}
catch (Exception e)
{
}
return (Namespace[]) ArrayUtils.clone(namespaces);
}
public String getSchemaLocation()
{
try
{
init();
}
catch (Exception e)
{
}
return schemaLocation;
}
/**
* Returns object's metadata in MODS format, as XML structure node.
*/
public List<Element> disseminateList(DSpaceObject dso)
throws CrosswalkException,
IOException, SQLException, AuthorizeException
{
return disseminateListInternal(dso, true);
}
private List<Element> disseminateListInternal(DSpaceObject dso, boolean addSchema)
throws CrosswalkException,
IOException, SQLException, AuthorizeException
{
if (dso.getType() != Constants.ITEM)
{
throw new CrosswalkObjectNotSupported("QDCCrosswalk can only crosswalk an Item.");
}
Item item = (Item)dso;
init();
DCValue[] dc = item.getMetadata(Item.ANY, Item.ANY, Item.ANY, Item.ANY);
List<Element> result = new ArrayList<Element>(dc.length);
for (int i = 0; i < dc.length; i++)
{
// Compose qualified DC name - schema.element[.qualifier]
// e.g. "dc.title", "dc.subject.lcc", "lom.Classification.Keyword"
String qdc = dc[i].schema+"."+
((dc[i].qualifier == null) ? dc[i].element
: (dc[i].element + "." + dc[i].qualifier));
Element elt = qdc2element.get(qdc);
// only complain about missing elements in the DC schema:
if (elt == null)
{
if (dc[i].schema.equals(MetadataSchema.DC_SCHEMA))
{
log.warn("WARNING: " + myName + ": No QDC mapping for \"" + qdc + "\"");
}
}
else
{
Element qe = (Element)elt.clone();
qe.setText(dc[i].value);
if (addSchema && schemaLocation != null)
{
qe.setAttribute("schemaLocation", schemaLocation, XSI_NS);
}
if (dc[i].language != null)
{
qe.setAttribute("lang", dc[i].language, Namespace.XML_NAMESPACE);
}
result.add(qe);
}
}
return result;
}
public Element disseminateElement(DSpaceObject dso)
throws CrosswalkException,
IOException, SQLException, AuthorizeException
{
init();
Element root = new Element("qualifieddc", DCTERMS_NS);
if (schemaLocation != null)
{
root.setAttribute("schemaLocation", schemaLocation, XSI_NS);
}
root.addContent(disseminateListInternal(dso, false));
return root;
}
public boolean canDisseminate(DSpaceObject dso)
{
return true;
}
public void ingest(Context context, DSpaceObject dso, Element root)
throws CrosswalkException, IOException, SQLException, AuthorizeException
{
init();
// NOTE: don't bother comparing namespace on root element
// because DCMI doesn't specify one, and every app uses its
// own.. just give up in the face of this madness and accept
// anything with the right name.
if (!(root.getName().equals("qualifieddc")))
{
throw new MetadataValidationException("Wrong root element for Qualified DC: " + root.toString());
}
ingest(context, dso, root.getChildren());
}
public void ingest(Context context, DSpaceObject dso, List<Element> ml)
throws CrosswalkException, IOException, SQLException, AuthorizeException
{
init();
// for now, forget about any targets but item.
if (dso.getType() != Constants.ITEM)
{
throw new CrosswalkInternalException("Wrong target object type, QDCCrosswalk can only crosswalk to an Item.");
}
Item item = (Item)dso;
for (Element me : ml)
{
String key = makeQualifiedTagName(me);
// if the root element gets passed here, recurse:
if ("qualifieddc".equals(me.getName()))
{
ingest(context, dso, me.getChildren());
}
else if (element2qdc.containsKey(key))
{
String qdc[] = (element2qdc.get(key)).split("\\.");
// get language - prefer xml:lang, accept lang.
String lang = me.getAttributeValue("lang", Namespace.XML_NAMESPACE);
if (lang == null)
{
lang = me.getAttributeValue("lang");
}
if (qdc.length == 3)
{
item.addMetadata(qdc[0], qdc[1], qdc[2], lang, me.getText());
}
else if (qdc.length == 2)
{
item.addMetadata(qdc[0], qdc[1], null, lang, me.getText());
}
else
{
throw new CrosswalkInternalException("Unrecognized format in QDC element identifier for key=\"" + key + "\", qdc=\"" + element2qdc.get(key) + "\"");
}
}
else
{
log.warn("WARNING: " + myName + ": No mapping for Element=\"" + key + "\" to qdc.");
}
}
}
public boolean preferList()
{
return true;
}
}
| Java |
/**
* The contents of this file are subject to the license and copyright
* detailed in the LICENSE and NOTICE files at the root of the source
* tree and available online at
*
* http://www.dspace.org/license/
*/
package org.dspace.content.crosswalk;
import java.io.IOException;
import java.sql.SQLException;
import java.util.List;
import org.dspace.authorize.AuthorizeException;
import org.dspace.content.DSpaceObject;
import org.dspace.core.Context;
import org.jdom.Element;
/**
* Ingestion Crosswalk plugin -- translate an external metadata format
* into DSpace native metadata.
* <p>
* This describes a plugin that translates an external XML
* metadata format (e.g. MODS) into the DSpace internal metadata
* representation. A crosswalk plugin may operate on different kinds of
* DSpace Objects, so the concept of "metadata" encompasses the
* qualified Dublin Core fields on Items, properties of Bitstreams, and
* metadata on Collections and Communities.
* <p>
*
* @author Larry Stone
* @version $Revision: 5844 $
*/
public interface IngestionCrosswalk
{
/**
* Crosswalk metadata from external XML representation to DSpace
* internal representations. This version accepts metadata as a
* <code>List</code> of JDOM XML elements. It interprets the
* contents of each element and adds the appropriate values to the
* DSpace Object's internal metadata represenation.
* <p>
* Note that this method may be called several times for the same target
* Item, if the metadata comes as several lists of elements, so it should
* not add fixed metadata values on each or they may appear multiples times.
* <p>
* NOTE: <br>
* Most XML metadata standards (e.g. MODS) are defined as a "root"
* element which contains a sequence of "fields" that have the
* descriptive information. Some metadata containers have a
* "disembodied" list of fields, rather than the root element, so
* this <code>ingest</code> method is intended to accept that bare
* list of fields. However, it must also accept a list containing
* only the "root" element for the metadata structure (e.g. the
* "mods:mods" wrapper in a MODS expression) as a member of the
* list. It can handle this case by calling the single-element
* version of ingest() on the "root" element.
* <p>
* Some callers of the crosswalk plugin may not be careful about (or
* capable of) choosing whether the list or element version should
* be called.
* <p>
* @param context DSpace context.
* @param dso DSpace Object (Item, Bitstream, etc) to which new metadata gets attached.
* @param metadata List of XML Elements of metadata
*
* @throws CrosswalkInternalException (<code>CrosswalkException</code>) failure of the crosswalk itself.
* @throws CrosswalkObjectNotSupported (<code>CrosswalkException</code>) Cannot crosswalk into this kind of DSpace object.
* @throws MetadataValidationException (<code>CrosswalkException</code>) metadata format was not acceptable or missing required elements.
* @throws IOException I/O failure in services this calls
* @throws SQLException Database failure in services this calls
* @throws AuthorizeException current user not authorized for this operation.
*/
public void ingest(Context context, DSpaceObject dso, List<Element> metadata)
throws CrosswalkException, IOException, SQLException, AuthorizeException;
/**
* Crosswalk metadata from external XML representation to DSpace
* internal representations. This version accepts a single "root"
* element of the XML metadata.
* <p>
* It is otherwise just like the <code>List</code> form of
* <code>ingest()</code> above.
* <p>
* @param context DSpace context.
* @param dso DSpace Object (usually an Item) to which new metadata gets attached.
* @param root root Element of metadata document.
*
* @throws CrosswalkInternalException (<code>CrosswalkException</code>) failure of the crosswalk itself.
* @throws CrosswalkObjectNotSupported (<code>CrosswalkException</code>) Cannot crosswalk into this kind of DSpace object.
* @throws MetadataValidationException (<code>CrosswalkException</code>) metadata format was not acceptable or missing required elements.
* @throws IOException I/O failure in services this calls
* @throws SQLException Database failure in services this calls
* @throws AuthorizeException current user not authorized for this operation.
*/
public void ingest(Context context, DSpaceObject dso, Element root)
throws CrosswalkException, IOException, SQLException, AuthorizeException;
}
| Java |
/**
* The contents of this file are subject to the license and copyright
* detailed in the LICENSE and NOTICE files at the root of the source
* tree and available online at
*
* http://www.dspace.org/license/
*/
package org.dspace.content.crosswalk;
import java.io.OutputStream;
import java.io.IOException;
import java.sql.SQLException;
import org.apache.log4j.Logger;
import org.dspace.authorize.AuthorizeException;
import org.dspace.content.DSpaceObject;
import org.dspace.content.Item;
import org.dspace.content.Bitstream;
import org.dspace.core.Constants;
import org.dspace.core.Context;
import org.dspace.core.Utils;
import org.dspace.license.CreativeCommons;
/**
* Export the object's Creative Commons license, text form.
*
* @author Larry Stone
* @version $Revision: 1.0 $
*/
public class CreativeCommonsTextStreamDisseminationCrosswalk
implements StreamDisseminationCrosswalk
{
/** log4j logger */
private static Logger log = Logger.getLogger(CreativeCommonsTextStreamDisseminationCrosswalk.class);
public boolean canDisseminate(Context context, DSpaceObject dso)
{
try
{
return dso.getType() == Constants.ITEM &&
CreativeCommons.getLicenseTextBitstream((Item)dso) != null;
}
catch (Exception e)
{
log.error("Failed getting CC license", e);
return false;
}
}
public void disseminate(Context context, DSpaceObject dso, OutputStream out)
throws CrosswalkException, IOException, SQLException, AuthorizeException
{
if (dso.getType() == Constants.ITEM)
{
Bitstream cc = CreativeCommons.getLicenseTextBitstream((Item)dso);
if (cc != null)
{
Utils.copy(cc.retrieve(), out);
out.close();
}
}
}
public String getMIMEType()
{
return "text/plain";
}
}
| Java |
/**
* The contents of this file are subject to the license and copyright
* detailed in the LICENSE and NOTICE files at the root of the source
* tree and available online at
*
* http://www.dspace.org/license/
*/
package org.dspace.content.crosswalk;
import java.io.InputStream;
import java.io.IOException;
import java.sql.SQLException;
import org.dspace.authorize.AuthorizeException;
import org.dspace.content.DSpaceObject;
import org.dspace.core.Context;
/**
* A crosswalk to ignore and dispose of the ingested material.
* <p>
* Specify this crosswalk in the mapping of e.g. METS metadata field
* types to crosswalks when you wish to ignore a redundant or unknown
* type of metadata. For example, when ingesting a DSpace AIP with an
* AIP ingester, it is best to ignore the rightsMD fields since they
* are already going to be ingested as member bitstreams anyway.
*
* @author Larry Stone
* @version $Revision: 1.0 $
*/
public class NullStreamIngestionCrosswalk
implements StreamIngestionCrosswalk
{
public void ingest(Context context, DSpaceObject dso, InputStream in, String MIMEType)
throws CrosswalkException, IOException, SQLException, AuthorizeException
{
in.close();
}
public String getMIMEType()
{
return "text/plain";
}
}
| Java |
/**
* The contents of this file are subject to the license and copyright
* detailed in the LICENSE and NOTICE files at the root of the source
* tree and available online at
*
* http://www.dspace.org/license/
*/
package org.dspace.content.crosswalk;
import java.io.File;
import java.io.IOException;
import java.sql.SQLException;
import java.util.ArrayList;
import java.util.List;
import org.apache.commons.lang.ArrayUtils;
import org.dspace.authorize.AuthorizeException;
import org.dspace.content.DSpaceObject;
import org.dspace.content.Item;
import org.dspace.content.packager.PackageDisseminator;
import org.dspace.content.packager.PackageException;
import org.dspace.content.packager.PackageParameters;
import org.dspace.core.ConfigurationManager;
import org.dspace.core.Constants;
import org.dspace.core.Context;
import org.dspace.core.PluginManager;
import org.jdom.Document;
import org.jdom.Element;
import org.jdom.JDOMException;
import org.jdom.Namespace;
import org.jdom.input.SAXBuilder;
/**
* METS dissemination crosswalk
* <p>
* Produces a METS manifest for the DSpace item as a metadata
* description -- intended to work within an application like the
* OAI server.
*
* @author Larry Stone
* @version $Revision: 5844 $
*/
public class METSDisseminationCrosswalk
implements DisseminationCrosswalk
{
// Plugin Name of METS packager to use for manifest;
// maybe make this configurable.
private static final String METS_PACKAGER_PLUGIN = "METS";
/**
* MODS namespace.
*/
public static final Namespace MODS_NS =
Namespace.getNamespace("mods", "http://www.loc.gov/mods/v3");
private static final Namespace XLINK_NS =
Namespace.getNamespace("xlink", "http://www.w3.org/TR/xlink");
/** METS namespace -- includes "mets" prefix for use in XPaths */
private static final Namespace METS_NS = Namespace
.getNamespace("mets", "http://www.loc.gov/METS/");
private static final Namespace namespaces[] = { METS_NS, MODS_NS, XLINK_NS };
/** URL of METS XML Schema */
private static final String METS_XSD = "http://www.loc.gov/standards/mets/mets.xsd";
private static final String schemaLocation =
METS_NS.getURI()+" "+METS_XSD;
public Namespace[] getNamespaces()
{
return (Namespace[]) ArrayUtils.clone(namespaces);
}
public String getSchemaLocation()
{
return schemaLocation;
}
public List<Element> disseminateList(DSpaceObject dso)
throws CrosswalkException,
IOException, SQLException, AuthorizeException
{
List<Element> result = new ArrayList<Element>(1);
result.add(disseminateElement(dso));
return result;
}
public Element disseminateElement(DSpaceObject dso)
throws CrosswalkException,
IOException, SQLException, AuthorizeException
{
if (dso.getType() != Constants.ITEM)
{
throw new CrosswalkObjectNotSupported("METSDisseminationCrosswalk can only crosswalk an Item.");
}
Item item = (Item)dso;
PackageDisseminator dip = (PackageDisseminator)
PluginManager.getNamedPlugin(PackageDisseminator.class, METS_PACKAGER_PLUGIN);
if (dip == null)
{
throw new CrosswalkInternalException("Cannot find a disseminate plugin for package=" + METS_PACKAGER_PLUGIN);
}
try
{
// Set the manifestOnly=true param so we just get METS document
PackageParameters pparams = new PackageParameters();
pparams.put("manifestOnly", "true");
// Create a temporary file to disseminate into
String tempDirectory = ConfigurationManager.getProperty("upload.temp.dir");
File tempFile = File.createTempFile("METSDissemination" + item.hashCode(), null, new File(tempDirectory));
tempFile.deleteOnExit();
// Disseminate METS to temp file
Context context = new Context();
dip.disseminate(context, item, pparams, tempFile);
try
{
SAXBuilder builder = new SAXBuilder();
Document metsDocument = builder.build(tempFile);
return metsDocument.getRootElement();
}
catch (JDOMException je)
{
throw new MetadataValidationException("Error parsing METS (see wrapped error message for more details) ",je);
}
}
catch (PackageException pe)
{
throw new CrosswalkInternalException("Failed making METS manifest in packager (see wrapped error message for more details) ",pe);
}
}
public boolean canDisseminate(DSpaceObject dso)
{
return true;
}
public boolean preferList()
{
return false;
}
}
| Java |
/**
* The contents of this file are subject to the license and copyright
* detailed in the LICENSE and NOTICE files at the root of the source
* tree and available online at
*
* http://www.dspace.org/license/
*/
package org.dspace.content.crosswalk;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.InputStream;
import java.net.ConnectException;
import java.net.URL;
import java.sql.SQLException;
import java.text.NumberFormat;
import java.util.Arrays;
import java.util.Date;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
import org.apache.log4j.Logger;
import org.dspace.authorize.AuthorizeException;
import org.dspace.content.Bitstream;
import org.dspace.content.BitstreamFormat;
import org.dspace.content.Bundle;
import org.dspace.content.DSpaceObject;
import org.dspace.content.FormatIdentifier;
import org.dspace.content.Item;
import org.dspace.core.Constants;
import org.dspace.core.Context;
import org.jdom.Attribute;
import org.jdom.Document;
import org.jdom.Element;
import org.jdom.JDOMException;
import org.jdom.Namespace;
import org.jdom.xpath.XPath;
/**
* ORE ingestion crosswalk
* <p>
* Processes an Atom-encoded ORE resource map and attemps to interpret it as a DSpace item
*
* @author Alexey Maslov
* @version $Revision: 1 $
*/
public class OREIngestionCrosswalk
implements IngestionCrosswalk
{
/** log4j category */
private static Logger log = Logger.getLogger(OREDisseminationCrosswalk.class);
/* Namespaces */
public static final Namespace ATOM_NS =
Namespace.getNamespace("atom", "http://www.w3.org/2005/Atom");
private static final Namespace ORE_ATOM =
Namespace.getNamespace("oreatom", "http://www.openarchives.org/ore/atom/");
private static final Namespace ORE_NS =
Namespace.getNamespace("ore", "http://www.openarchives.org/ore/terms/");
private static final Namespace RDF_NS =
Namespace.getNamespace("rdf", "http://www.w3.org/1999/02/22-rdf-syntax-ns#");
private static final Namespace DCTERMS_NS =
Namespace.getNamespace("dcterms", "http://purl.org/dc/terms/");
private static final Namespace DS_NS =
Namespace.getNamespace("ds","http://www.dspace.org/objectModel/");
public void ingest(Context context, DSpaceObject dso, List<Element> metadata) throws CrosswalkException, IOException, SQLException, AuthorizeException {
// If this list contains only the root already, just pass it on
if (metadata.size() == 1) {
ingest(context, dso, metadata.get(0));
}
// Otherwise, wrap them up
else {
Element wrapper = new Element("wrap", metadata.get(0).getNamespace());
wrapper.addContent(metadata);
ingest(context,dso,wrapper);
}
}
public void ingest(Context context, DSpaceObject dso, Element root) throws CrosswalkException, IOException, SQLException, AuthorizeException {
Date timeStart = new Date();
if (dso.getType() != Constants.ITEM)
{
throw new CrosswalkObjectNotSupported("OREIngestionCrosswalk can only crosswalk an Item.");
}
Item item = (Item)dso;
if (root == null) {
System.err.println("The element received by ingest was null");
return;
}
Document doc = new Document();
doc.addContent(root.detach());
XPath xpathLinks;
List<Element> aggregatedResources;
String entryId;
try {
xpathLinks = XPath.newInstance("/atom:entry/atom:link[@rel=\"" + ORE_NS.getURI()+"aggregates" + "\"]");
xpathLinks.addNamespace(ATOM_NS);
aggregatedResources = xpathLinks.selectNodes(doc);
xpathLinks = XPath.newInstance("/atom:entry/atom:link[@rel='alternate']/@href");
xpathLinks.addNamespace(ATOM_NS);
entryId = ((Attribute)xpathLinks.selectSingleNode(doc)).getValue();
} catch (JDOMException e) {
throw new CrosswalkException("JDOM exception occured while ingesting the ORE", e);
}
// Next for each resource, create a bitstream
XPath xpathDesc;
NumberFormat nf=NumberFormat.getInstance();
nf.setGroupingUsed(false);
nf.setMinimumIntegerDigits(4);
for (Element resource : aggregatedResources)
{
String href = resource.getAttributeValue("href");
log.debug("ORE processing: " + href);
String bundleName;
Element desc = null;
try {
xpathDesc = XPath.newInstance("/atom:entry/oreatom:triples/rdf:Description[@rdf:about=\"" + this.encodeForURL(href) + "\"][1]");
xpathDesc.addNamespace(ATOM_NS);
xpathDesc.addNamespace(ORE_ATOM);
xpathDesc.addNamespace(RDF_NS);
desc = (Element)xpathDesc.selectSingleNode(doc);
} catch (JDOMException e) {
e.printStackTrace();
}
if (desc != null && desc.getChild("type", RDF_NS).getAttributeValue("resource", RDF_NS).equals(DS_NS.getURI() + "DSpaceBitstream"))
{
bundleName = desc.getChildText("description", DCTERMS_NS);
log.debug("Setting bundle name to: " + bundleName);
}
else {
log.info("Could not obtain bundle name; using 'ORIGINAL'");
bundleName = "ORIGINAL";
}
// Bundle names are not unique, so we just pick the first one if there's more than one.
Bundle[] targetBundles = item.getBundles(bundleName);
Bundle targetBundle;
// if null, create the new bundle and add it in
if (targetBundles.length == 0) {
targetBundle = item.createBundle(bundleName);
item.addBundle(targetBundle);
}
else {
targetBundle = targetBundles[0];
}
URL ARurl = null;
InputStream in = null;
if (href != null) {
try {
// Make sure the url string escapes all the oddball characters
String processedURL = encodeForURL(href);
// Generate a requeset for the aggregated resource
ARurl = new URL(processedURL);
in = ARurl.openStream();
}
catch(FileNotFoundException fe) {
log.error("The provided URI failed to return a resource: " + href);
}
catch(ConnectException fe) {
log.error("The provided URI was invalid: " + href);
}
}
else {
throw new CrosswalkException("Entry did not contain link to resource: " + entryId);
}
// ingest and update
if (in != null) {
Bitstream newBitstream = targetBundle.createBitstream(in);
String bsName = resource.getAttributeValue("title");
newBitstream.setName(bsName);
// Identify the format
String mimeString = resource.getAttributeValue("type");
BitstreamFormat bsFormat = BitstreamFormat.findByMIMEType(context, mimeString);
if (bsFormat == null) {
bsFormat = FormatIdentifier.guessFormat(context, newBitstream);
}
newBitstream.setFormat(bsFormat);
newBitstream.update();
targetBundle.addBitstream(newBitstream);
targetBundle.update();
}
else {
throw new CrosswalkException("Could not retrieve bitstream: " + entryId);
}
}
log.info("OREIngest for Item "+ item.getID() + " took: " + (new Date().getTime() - timeStart.getTime()) + "ms.");
}
/**
* Helper method to escape all chaacters that are not part of the canon set
* @param sourceString source unescaped string
*/
private String encodeForURL(String sourceString) {
Character lowalpha[] = {'a' , 'b' , 'c' , 'd' , 'e' , 'f' , 'g' , 'h' , 'i' ,
'j' , 'k' , 'l' , 'm' , 'n' , 'o' , 'p' , 'q' , 'r' ,
's' , 't' , 'u' , 'v' , 'w' , 'x' , 'y' , 'z'};
Character upalpha[] = {'A' , 'B' , 'C' , 'D' , 'E' , 'F' , 'G' , 'H' , 'I' ,
'J' , 'K' , 'L' , 'M' , 'N' , 'O' , 'P' , 'Q' , 'R' ,
'S' , 'T' , 'U' , 'V' , 'W' , 'X' , 'Y' , 'Z'};
Character digit[] = {'0' , '1' , '2' , '3' , '4' , '5' , '6' , '7' , '8' , '9'};
Character mark[] = {'-' , '_' , '.' , '!' , '~' , '*' , '\'' , '(' , ')'};
// reserved
Character reserved[] = {';' , '/' , '?' , ':' , '@' , '&' , '=' , '+' , '$' , ',' ,'%', '#'};
Set<Character> URLcharsSet = new HashSet<Character>();
URLcharsSet.addAll(Arrays.asList(lowalpha));
URLcharsSet.addAll(Arrays.asList(upalpha));
URLcharsSet.addAll(Arrays.asList(digit));
URLcharsSet.addAll(Arrays.asList(mark));
URLcharsSet.addAll(Arrays.asList(reserved));
StringBuilder processedString = new StringBuilder();
for (int i=0; i<sourceString.length(); i++) {
char ch = sourceString.charAt(i);
if (URLcharsSet.contains(ch)) {
processedString.append(ch);
}
else {
processedString.append("%").append(Integer.toHexString((int)ch));
}
}
return processedString.toString();
}
}
| Java |
/**
* The contents of this file are subject to the license and copyright
* detailed in the LICENSE and NOTICE files at the root of the source
* tree and available online at
*
* http://www.dspace.org/license/
*/
package org.dspace.content.crosswalk;
import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
import java.sql.SQLException;
import java.util.List;
import org.dspace.authorize.AuthorizeException;
import org.dspace.content.DSpaceObject;
import org.dspace.content.packager.PackageDisseminator;
import org.dspace.content.packager.PackageException;
import org.dspace.content.packager.PackageIngester;
import org.dspace.content.packager.PackageParameters;
import org.dspace.content.packager.RoleDisseminator;
import org.dspace.core.ConfigurationManager;
import org.dspace.core.Constants;
import org.dspace.core.Context;
import org.dspace.core.PluginManager;
import org.jdom.Document;
import org.jdom.Element;
import org.jdom.JDOMException;
import org.jdom.Namespace;
import org.jdom.input.SAXBuilder;
import org.jdom.output.XMLOutputter;
/**
* Role Crosswalk
* <p>
* Translate between DSpace Group & EPeople definitions and a DSpace-specific
* XML export format (generated by the RoleDisseminator). This is primarily
* used for AIPs, but may be used by other Packagers as necessary.
* <p>
* This crosswalk allows you to export DSpace Groups & EPeople to this XML
* structured format. It also allows you to import an XML file of this format
* in order to restore DSpace Groups and EPeople defined within it.
* <p>
* This is just wrappers; the real work is done in RoleDisseminator and
* RoleIngester.
*
* @author mwood
* @author Tim Donohue
* @see org.dspace.content.packager.RoleDisseminator
* @see org.dspace.content.packager.RoleIngester
* @see AbstractPackagerWrappingCrosswalk
* @see IngestionCrosswalk
* @see DisseminationCrosswalk
*/
public class RoleCrosswalk
extends AbstractPackagerWrappingCrosswalk
implements IngestionCrosswalk, DisseminationCrosswalk
{
// Plugin Name of DSPACE-ROLES packager to use for ingest/dissemination
// (Whatever plugin is defined with this name in 'dspace.cfg' will be used by this Crosswalk)
private static final String ROLE_PACKAGER_PLUGIN = "DSPACE-ROLES";
// ---- Dissemination Methods -----------
/**
* Get XML namespaces of the elements this crosswalk may return.
* Returns the XML namespaces (as JDOM objects) of the root element.
*
* @return array of namespaces, which may be empty.
*/
@Override
public Namespace[] getNamespaces()
{
Namespace result[] = new Namespace[1];
result[0] = RoleDisseminator.DSROLES_NS;
return result;
}
/**
* Get the XML Schema location(s) of the target metadata format.
* Returns the string value of the <code>xsi:schemaLocation</code>
* attribute that should be applied to the generated XML.
* <p>
* It may return the empty string if no schema is known, but crosswalk
* authors are strongly encouraged to implement this call so their output
* XML can be validated correctly.
* @return SchemaLocation string, including URI namespace, followed by
* whitespace and URI of XML schema document, or empty string if unknown.
*/
@Override
public String getSchemaLocation()
{
return "";
}
/**
* Predicate: Can this disseminator crosswalk the given object.
*
* @param dso dspace object, e.g. an <code>Item</code>.
* @return true when disseminator is capable of producing metadata.
*/
@Override
public boolean canDisseminate(DSpaceObject dso)
{
//We can only disseminate SITE, COMMUNITY or COLLECTION objects,
//as Groups are only associated with those objects.
return (dso.getType() == Constants.SITE ||
dso.getType() == Constants.COMMUNITY ||
dso.getType() == Constants.COLLECTION);
}
/**
* Predicate: Does this disseminator prefer to return a list of Elements,
* rather than a single root Element?
*
* @return true when disseminator prefers you call disseminateList().
*/
@Override
public boolean preferList()
{
//We prefer disseminators call 'disseminateElement()' instead of 'disseminateList()'
return false;
}
/**
* Execute crosswalk, returning List of XML elements.
* Returns a <code>List</code> of JDOM <code>Element</code> objects representing
* the XML produced by the crosswalk. This is typically called when
* a list of fields is desired, e.g. for embedding in a METS document
* <code>xmlData</code> field.
* <p>
* When there are no results, an
* empty list is returned, but never <code>null</code>.
*
* @param dso the DSpace Object whose metadata to export.
* @return results of crosswalk as list of XML elements.
*
* @throws CrosswalkInternalException (<code>CrosswalkException</code>) failure of the crosswalk itself.
* @throws CrosswalkObjectNotSupported (<code>CrosswalkException</code>) Cannot crosswalk this kind of DSpace object.
* @throws IOException I/O failure in services this calls
* @throws SQLException Database failure in services this calls
* @throws AuthorizeException current user not authorized for this operation.
*/
@Override
public List<Element> disseminateList(DSpaceObject dso)
throws CrosswalkException, IOException, SQLException,
AuthorizeException
{
Element dim = disseminateElement(dso);
return dim.getChildren();
}
/**
* Execute crosswalk, returning one XML root element as
* a JDOM <code>Element</code> object.
* This is typically the root element of a document.
* <p>
*
* @param dso the DSpace Object whose metadata to export.
* @return root Element of the target metadata, never <code>null</code>
*
* @throws CrosswalkInternalException (<code>CrosswalkException</code>) failure of the crosswalk itself.
* @throws CrosswalkObjectNotSupported (<code>CrosswalkException</code>) Cannot crosswalk this kind of DSpace object.
* @throws IOException I/O failure in services this calls
* @throws SQLException Database failure in services this calls
* @throws AuthorizeException current user not authorized for this operation.
*/
@Override
public Element disseminateElement(DSpaceObject dso)
throws CrosswalkException, IOException, SQLException,
AuthorizeException
{
try
{
PackageDisseminator dip = (PackageDisseminator)
PluginManager.getNamedPlugin(PackageDisseminator.class, ROLE_PACKAGER_PLUGIN);
if (dip == null)
{
throw new CrosswalkInternalException("Cannot find a PackageDisseminator plugin named " + ROLE_PACKAGER_PLUGIN);
}
// Create a temporary file to disseminate into
String tempDirectory = ConfigurationManager.getProperty("upload.temp.dir");
File tempFile = File.createTempFile("RoleCrosswalkDisseminate" + dso.hashCode(), null, new File(tempDirectory));
tempFile.deleteOnExit();
// Initialize our packaging parameters
PackageParameters pparams;
if(this.getPackagingParameters()!=null)
{
pparams = this.getPackagingParameters();
}
else
{
pparams = new PackageParameters();
}
//actually disseminate to our temp file.
Context context = new Context();
dip.disseminate(context, dso, pparams, tempFile);
// if we ended up with a Zero-length output file,
// this means dissemination was successful but had no results
if(tempFile.exists() && tempFile.length()==0)
{
return null;
}
try
{
//Try to parse our XML results (which were disseminated by the Packager)
SAXBuilder builder = new SAXBuilder();
Document xmlDocument = builder.build(tempFile);
//If XML parsed successfully, return root element of doc
if(xmlDocument!=null && xmlDocument.hasRootElement())
{
return xmlDocument.getRootElement();
}
else
{
return null;
}
}
catch (JDOMException je)
{
throw new MetadataValidationException("Error parsing Roles XML (see wrapped error message for more details) ",je);
}
}
catch (PackageException pe)
{
throw new CrosswalkInternalException("Failed to export Roles via packager (see wrapped error message for more details) ",pe);
}
}
// ---- Ingestion Methods -----------
/**
* Ingest a List of XML elements
*
* @param context
* @param dso
* @param metadata
* @throws CrosswalkException
* @throws IOException
* @throws SQLException
* @throws AuthorizeException
*/
@Override
public void ingest(Context context, DSpaceObject dso, List<Element> metadata)
throws CrosswalkException, IOException, SQLException, AuthorizeException
{
if(!metadata.isEmpty())
{
ingest(context, dso, ((Element) metadata.get(0)).getParentElement());
}
}
/**
* Ingest a whole XML document, starting at specified root.
* <P>
* This essentially just wraps a call to the configured Role PackageIngester.
*
* @param context
* @param dso
* @param root
* @throws CrosswalkException
* @throws IOException
* @throws SQLException
* @throws AuthorizeException
*/
@Override
public void ingest(Context context, DSpaceObject dso, Element root)
throws CrosswalkException, IOException, SQLException, AuthorizeException
{
if (dso.getType() != Constants.SITE &&
dso.getType() != Constants.COMMUNITY &&
dso.getType() != Constants.COLLECTION)
{
throw new CrosswalkObjectNotSupported("Role crosswalk only valid for Site, Community or Collection");
}
//locate our "DSPACE-ROLES" PackageIngester plugin
PackageIngester sip = (PackageIngester)
PluginManager.getNamedPlugin(PackageIngester.class, ROLE_PACKAGER_PLUGIN);
if (sip == null)
{
throw new CrosswalkInternalException("Cannot find a PackageIngester plugin named " + ROLE_PACKAGER_PLUGIN);
}
// Initialize our packaging parameters
PackageParameters pparams;
if(this.getPackagingParameters()!=null)
{
pparams = this.getPackagingParameters();
}
else
{
pparams = new PackageParameters();
}
// Initialize our license info
String license = null;
if(this.getIngestionLicense()!=null)
{
license = this.getIngestionLicense();
}
// Create a temporary file to ingest from
String tempDirectory = ConfigurationManager.getProperty("upload.temp.dir");
File tempFile = File.createTempFile("RoleCrosswalkIngest" + dso.hashCode(), null, new File(tempDirectory));
tempFile.deleteOnExit();
FileOutputStream fileOutStream = null;
try
{
fileOutStream = new FileOutputStream(tempFile);
XMLOutputter writer = new XMLOutputter();
writer.output(root, fileOutStream);
}
finally
{
if (fileOutStream != null)
{
fileOutStream.close();
}
}
//Actually call the ingester
try
{
sip.ingest(context, dso, tempFile, pparams, license);
}
catch (PackageException e)
{
throw new CrosswalkInternalException(e);
}
}
} | Java |
/**
* The contents of this file are subject to the license and copyright
* detailed in the LICENSE and NOTICE files at the root of the source
* tree and available online at
*
* http://www.dspace.org/license/
*/
package org.dspace.content.crosswalk;
import java.io.InputStream;
import java.io.IOException;
import java.sql.SQLException;
import org.apache.log4j.Logger;
import org.dspace.authorize.AuthorizeException;
import org.dspace.content.DSpaceObject;
import org.dspace.content.Item;
import org.dspace.core.Constants;
import org.dspace.core.Context;
import org.dspace.license.CreativeCommons;
/**
* Ingest a Creative Commons license, RDF form.
* <p>
* Note that this is NOT needed when ingesting a DSpace AIP, since the
* CC license is stored as a Bitstream (or two) in a dedicated Bundle;
* the normal apparatus of ingestig the AIP will restore that Bitstream
* with its proper name and thus the presence of the CC license.
* <p>
* This crosswalk should only be used when ingesting other kinds of SIPs.
*
* @author Larry Stone
* @version $Revision: 1.0 $
*/
public class CreativeCommonsRDFStreamIngestionCrosswalk
implements StreamIngestionCrosswalk
{
/** log4j logger */
private static Logger log = Logger.getLogger(CreativeCommonsRDFStreamIngestionCrosswalk.class);
public void ingest(Context context, DSpaceObject dso, InputStream in, String MIMEType)
throws CrosswalkException, IOException, SQLException, AuthorizeException
{
// If package includes a Creative Commons license, add that:
if (dso.getType() == Constants.ITEM)
{
if (log.isDebugEnabled())
{
log.debug("Reading a Creative Commons license, MIMEtype=" + MIMEType);
}
CreativeCommons.setLicense(context, (Item)dso, in, MIMEType);
}
}
public String getMIMEType()
{
return "text/rdf";
}
}
| Java |
/**
* The contents of this file are subject to the license and copyright
* detailed in the LICENSE and NOTICE files at the root of the source
* tree and available online at
*
* http://www.dspace.org/license/
*/
package org.dspace.content;
import java.io.IOException;
import java.sql.SQLException;
import org.dspace.authorize.AuthorizeException;
import org.dspace.eperson.EPerson;
/**
* Interface for manipulating in-progress submissions, without having to know at
* which stage of submission they are (in workspace or workflow system)
*
* @author Robert Tansley
* @version $Revision: 5844 $
*/
public interface InProgressSubmission
{
/**
* Get the internal ID of this submission
*
* @return the internal identifier
*/
int getID();
/**
* Deletes submission wrapper, doesn't delete item contents
*/
void deleteWrapper() throws SQLException, IOException, AuthorizeException;
/**
* Update the submission, including the unarchived item.
*/
void update() throws SQLException, IOException, AuthorizeException;
/**
* Get the incomplete item object
*
* @return the item
*/
Item getItem();
/**
* Get the collection being submitted to
*
* @return the collection
*/
Collection getCollection();
/**
* Get the submitter
*
* @return the submitting e-person
*/
EPerson getSubmitter() throws SQLException;
/**
* Find out if the submission has (or is intended to have) more than one
* associated bitstream.
*
* @return <code>true</code> if there is to be more than one file.
*/
boolean hasMultipleFiles();
/**
* Indicate whether the submission is intended to have more than one file.
*
* @param b
* if <code>true</code>, submission may have more than one
* file.
*/
void setMultipleFiles(boolean b);
/**
* Find out if the submission has (or is intended to have) more than one
* title.
*
* @return <code>true</code> if there is to be more than one file.
*/
boolean hasMultipleTitles();
/**
* Indicate whether the submission is intended to have more than one title.
*
* @param b
* if <code>true</code>, submission may have more than one
* title.
*/
void setMultipleTitles(boolean b);
/**
* Find out if the submission has been published or publicly distributed
* before
*
* @return <code>true</code> if it has been published before
*/
boolean isPublishedBefore();
/**
* Indicate whether the submission has been published or publicly
* distributed before
*
* @param b
* <code>true</code> if it has been published before
*/
void setPublishedBefore(boolean b);
}
| Java |
/**
* The contents of this file are subject to the license and copyright
* detailed in the LICENSE and NOTICE files at the root of the source
* tree and available online at
*
* http://www.dspace.org/license/
*/
package org.dspace.app.checker;
import java.io.FileNotFoundException;
import java.util.ArrayList;
import java.util.Calendar;
import java.util.Date;
import java.util.List;
import org.apache.commons.cli.CommandLine;
import org.apache.commons.cli.CommandLineParser;
import org.apache.commons.cli.HelpFormatter;
import org.apache.commons.cli.Option;
import org.apache.commons.cli.OptionBuilder;
import org.apache.commons.cli.Options;
import org.apache.commons.cli.ParseException;
import org.apache.commons.cli.PosixParser;
import org.apache.log4j.Logger;
import org.dspace.checker.BitstreamDispatcher;
import org.dspace.checker.BitstreamInfoDAO;
import org.dspace.checker.CheckerCommand;
import org.dspace.checker.HandleDispatcher;
import org.dspace.checker.LimitedCountDispatcher;
import org.dspace.checker.LimitedDurationDispatcher;
import org.dspace.checker.ListDispatcher;
import org.dspace.checker.ResultsLogger;
import org.dspace.checker.ResultsPruner;
import org.dspace.checker.SimpleDispatcher;
import org.dspace.core.Utils;
/**
* Command line access to the checksum checker. Options are listed in the
* documentation for the main method.</p>
*
* @author Jim Downing
* @author Grace Carpenter
* @author Nathan Sarr
*/
public final class ChecksumChecker
{
private static final Logger LOG = Logger.getLogger(ChecksumChecker.class);
/**
* Blanked off constructor, this class should be used as a command line
* tool.
*
*/
private ChecksumChecker()
{
}
/**
* Command line access to the checksum package.
*
* @param args
* <dl>
* <dt>-h</dt>
* <dd>Print help on command line options</dd>
* <dt>-l</dt>
* <dd>loop through bitstreams once</dd>
* <dt>-L</dt>
* <dd>loop continuously through bitstreams</dd>
* <dt>-d</dt>
* <dd>specify duration of process run</dd>
* <dt>-b</dt>
* <dd>specify bitstream IDs</dd>
* <dt>-a [handle_id]</dt>
* <dd>check anything by handle</dd>
* <dt>-e</dt>
* <dd>Report only errors in the logs</dd>
* <dt>-p</dt>
* <dd>Don't prune results before running checker</dd>
* </dl>
*/
public static void main(String[] args)
{
// set up command line parser
CommandLineParser parser = new PosixParser();
CommandLine line = null;
// create an options object and populate it
Options options = new Options();
options.addOption("l", "looping", false, "Loop once through bitstreams");
options.addOption("L", "continuous", false,
"Loop continuously through bitstreams");
options.addOption("h", "help", false, "Help");
options.addOption("d", "duration", true, "Checking duration");
options.addOption("c", "count", true, "Check count");
options.addOption("a", "handle", true, "Specify a handle to check");
options.addOption("v", "verbose", false, "Report all processing");
OptionBuilder.withArgName("bitstream-ids").hasArgs().withDescription(
"Space separated list of bitstream ids");
Option useBitstreamIds = OptionBuilder.create('b');
options.addOption(useBitstreamIds);
options.addOption("p", "prune", false, "Prune configuration file");
options
.addOption(OptionBuilder
.withArgName("prune")
.hasOptionalArgs(1)
.withDescription(
"Prune old results (optionally using specified properties file for configuration)")
.create('p'));
try
{
line = parser.parse(options, args);
}
catch (ParseException e)
{
LOG.fatal(e);
System.exit(1);
}
// user asks for help
if (line.hasOption('h'))
{
printHelp(options);
}
// Prune stage
if (line.hasOption('p'))
{
ResultsPruner rp = null;
try
{
rp = (line.getOptionValue('p') != null) ? ResultsPruner
.getPruner(line.getOptionValue('p')) : ResultsPruner
.getDefaultPruner();
}
catch (FileNotFoundException e)
{
LOG.error("File not found", e);
System.exit(1);
}
int count = rp.prune();
System.out.println("Pruned " + count
+ " old results from the database.");
}
Date processStart = Calendar.getInstance().getTime();
BitstreamDispatcher dispatcher = null;
// process should loop infinitely through
// most_recent_checksum table
if (line.hasOption('l'))
{
dispatcher = new SimpleDispatcher(new BitstreamInfoDAO(), processStart, false);
}
else if (line.hasOption('L'))
{
dispatcher = new SimpleDispatcher(new BitstreamInfoDAO(), processStart, true);
}
else if (line.hasOption('b'))
{
// check only specified bitstream(s)
String[] ids = line.getOptionValues('b');
List<Integer> idList = new ArrayList<Integer>(ids.length);
for (int i = 0; i < ids.length; i++)
{
try
{
idList.add(Integer.valueOf(ids[i]));
}
catch (NumberFormatException nfe)
{
System.err.println("The following argument: " + ids[i]
+ " is not an integer");
System.exit(0);
}
}
dispatcher = new ListDispatcher(idList);
}
else if (line.hasOption('a'))
{
dispatcher = new HandleDispatcher(new BitstreamInfoDAO(), line.getOptionValue('a'));
}
else if (line.hasOption('d'))
{
// run checker process for specified duration
try
{
dispatcher = new LimitedDurationDispatcher(
new SimpleDispatcher(new BitstreamInfoDAO(), processStart, true), new Date(
System.currentTimeMillis()
+ Utils.parseDuration(line
.getOptionValue('d'))));
}
catch (Exception e)
{
LOG.fatal("Couldn't parse " + line.getOptionValue('d')
+ " as a duration: ", e);
System.exit(0);
}
}
else if (line.hasOption('c'))
{
int count = Integer.valueOf(line.getOptionValue('c')).intValue();
// run checker process for specified number of bitstreams
dispatcher = new LimitedCountDispatcher(new SimpleDispatcher(
new BitstreamInfoDAO(), processStart, false), count);
}
else
{
dispatcher = new LimitedCountDispatcher(new SimpleDispatcher(
new BitstreamInfoDAO(), processStart, false), 1);
}
ResultsLogger logger = new ResultsLogger(processStart);
CheckerCommand checker = new CheckerCommand();
// verbose reporting
if (line.hasOption('v'))
{
checker.setReportVerbose(true);
}
checker.setProcessStartDate(processStart);
checker.setDispatcher(dispatcher);
checker.setCollector(logger);
checker.process();
System.exit(0);
}
/**
* Print the help options for the user
*
* @param options that are available for the user
*/
private static void printHelp(Options options)
{
HelpFormatter myhelp = new HelpFormatter();
myhelp.printHelp("Checksum Checker\n", options);
System.out
.println("\nSpecify a duration for checker process, using s(seconds),"
+ "m(minutes), or h(hours): ChecksumChecker -d 30s"
+ " OR ChecksumChecker -d 30m"
+ " OR ChecksumChecker -d 2h");
System.out
.println("\nSpecify bitstream IDs: ChecksumChecker -b 13 15 17 20");
System.out.println("\nLoop once through all bitstreams: "
+ "ChecksumChecker -l");
System.out
.println("\nLoop continuously through all bitstreams: ChecksumChecker -L");
System.out
.println("\nCheck a defined number of bitstreams: ChecksumChecker -c 10");
System.out.println("\nReport all processing (verbose)(default reports only errors): ChecksumChecker -v");
System.out.println("\nDefault (no arguments) is equivalent to '-c 1'");
System.exit(0);
}
}
| Java |
/**
* The contents of this file are subject to the license and copyright
* detailed in the LICENSE and NOTICE files at the root of the source
* tree and available online at
*
* http://www.dspace.org/license/
*/
package org.dspace.app.statistics;
import org.dspace.core.ConfigurationManager;
import java.text.DateFormat;
import java.util.ArrayList;
import java.util.Date;
import java.util.Iterator;
import java.util.List;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import java.io.*;
/**
* This class provides HTML reports for the ReportGenerator class
*
* @author Richard Jones
*/
public class HTMLReport implements Report
{
// FIXME: all of these methods should do some content escaping before
// outputting anything
/** a list of the statistic blocks being managed by this class */
private List<Statistics> blocks = new ArrayList<Statistics>();
/** the title for the page */
private String pageTitle = null;
/** the main title for the page */
private String mainTitle = null;
/** start date for report */
private Date start = null;
/** end date for report */
private Date end = null;
/** the output file to which to write aggregation data */
private String output = ConfigurationManager.getProperty("dspace.dir") +
File.separator + "log" + File.separator + "report";
/**
* constructor for HTML reporting
*/
public HTMLReport()
{
// empty constructor
}
public void setOutput(String newOutput)
{
if (newOutput != null)
{
output = newOutput;
}
}
/**
* return a string containing the report as generated by this class
*
* @return the HTML report
*/
public String render()
{
StringBuffer frag = new StringBuffer();
// get the page headings
frag.append(header(pageTitle));
frag.append(mainTitle());
frag.append(dateRange());
// output the report blocks
// FIXME: perhaps the order of report blocks should be configurable
Iterator<Statistics> statSets = blocks.iterator();
while (statSets.hasNext())
{
frag.append(navigation());
Statistics stats = statSets.next();
frag.append(sectionHeader(stats.getSectionHeader()));
frag.append(topLink());
frag.append(blockExplanation(stats.getExplanation()));
frag.append(floorInfo(stats.getFloor()));
frag.append(statBlock(stats));
}
// output the footer and return
frag.append(footer());
// NB: HTMLReport now takes responsibility to write the output file,
// so that the Report/ReportGenerator can have more general usage
// finally write the string into the output file
try
{
FileOutputStream fos = new FileOutputStream(output);
OutputStreamWriter osr = new OutputStreamWriter(fos, "UTF-8");
PrintWriter out = new PrintWriter(osr);
out.write(frag.toString());
out.close();
}
catch (IOException e)
{
System.out.println("Unable to write to output file " + output);
System.exit(0);
}
return frag.toString();
}
/**
* provide a link back to the top of the page
*
* @return a string containing the link text HTML formatted
*/
public String topLink()
{
return "<div class=\"reportNavigation\"><a href=\"#top\">Top</a></div>";
}
/**
* build the internal navigation for the report
*
* @return an HTML string providing internal page navigation
*/
public String navigation()
{
StringBuffer frag = new StringBuffer();
frag.append("<div class=\"reportNavigation\">");
frag.append("<a href=\"#general_overview\">General Overview</a>");
frag.append(" | ");
frag.append("<a href=\"#archive_information\">Archive Information</a>");
frag.append(" | ");
frag.append("<a href=\"#items_viewed\">Items Viewed</a>");
frag.append(" | ");
frag.append("<a href=\"#all_actions_performed\">All Actions Performed</a>");
frag.append(" | ");
frag.append("<a href=\"#user_logins\">User Logins</a>");
frag.append(" | ");
frag.append("<a href=\"#words_searched\">Words Searched</a>");
frag.append(" | ");
frag.append("<a href=\"#averaging_information\">Averaging Information</a>");
frag.append(" | ");
frag.append("<a href=\"#log_level_information\">Log Level Information</a>");
frag.append(" | ");
frag.append("<a href=\"#processing_information\">Processing Information</a>");
frag.append("</div>");
return frag.toString();
}
/**
* add a statistics block to the report to the class register
*
* @param stat the statistics object to be added to the report
*/
public void addBlock(Statistics stat)
{
blocks.add(stat);
return;
}
/**
* set the starting date for the report
*
* @param start the start date for the report
*/
public void setStartDate(Date start)
{
this.start = (start == null ? null : new Date(start.getTime()));
}
/**
* set the end date for the report
*
* @param end the end date for the report
*/
public void setEndDate(Date end)
{
this.end = (end == null ? null : new Date(end.getTime()));
}
/**
* output the date range in the relevant format. This requires that the
* date ranges have been set using setStartDate() and setEndDate()
*
* @return a string containing date range information
*/
public String dateRange()
{
StringBuffer frag = new StringBuffer();
DateFormat df = DateFormat.getDateInstance();
frag.append("<div class=\"reportDate\">");
if (start != null)
{
frag.append(df.format(start));
}
else
{
frag.append("from start of records ");
}
frag.append(" to ");
if (end != null)
{
frag.append(df.format(end));
}
else
{
frag.append(" end of records");
}
frag.append("</div>\n\n");
return frag.toString();
}
/**
* output the title in the relevant format. This requires that the title
* has been set with setMainTitle()
*
* @return a string containing the title of the report
*/
public String mainTitle()
{
return "<div class=\"reportTitle\"><a name=\"top\">" + mainTitle + "</a></div>\n\n";
}
/**
* set the main title for the report
*
* @param name the name of the service
* @param serverName the name of the server
*/
public void setMainTitle(String name, String serverName)
{
mainTitle = "Statistics for " + name + " on " + serverName;
if (pageTitle == null)
{
pageTitle = mainTitle;
}
return;
}
/**
* output any top headers that this page needs
*
* @return a string containing the header for the report
*/
public String header()
{
return header("");
}
/**
* output any top headers that this page needs, and include a title
* argument (Title support currently not implemented)
*
* @param title the title of the item being headered
*/
public String header(String title)
{
// FIXME: this need to be figured out to integrate nicely into the
// whole JSTL thing, but for the moment it's just going to deliver
// some styles
StringBuffer frag = new StringBuffer();
frag.append("<style type=\"text/css\">\n");
frag.append("body { font-family: Arial, Helvetica, sans-serif }");
frag.append(".reportTitle { width: 100%; clear: both; text-align: center; font-weight: bold; font-size: 200%; margin: 20px; }\n");
frag.append(".reportSection { width: 100%; clear: both; font-weight: bold; font-size: 160%; margin: 10px; text-align: center; margin-top: 30px; }\n");
frag.append(".reportBlock { border: 1px solid #000000; margin: 10px; }\n");
frag.append(".reportOddRow { background: #dddddd; }\n");
frag.append(".reportEvenRow { background: #bbbbbb; }\n");
frag.append(".reportExplanation { font-style: italic; text-align: center; }\n");
frag.append(".reportDate { font-style: italic; text-align: center; font-size: 120% }\n");
frag.append(".reportFloor { text-align: center; }\n");
frag.append(".rightAlign { text-align: right; }\n");
frag.append(".reportNavigation { text-align: center; }\n");
frag.append("</style>\n");
return frag.toString();
}
/**
* output the section header in HTML format
*
* @param title the title of the section
*
* @return a string containing the section title HTML formatted
*/
public String sectionHeader(String title)
{
// prepare the title to be an <a name="#title"> style link
// FIXME: this should be made more generic and used in a number of locations
String aName = title.toLowerCase();
Pattern space = Pattern.compile(" ");
Matcher matchSpace = space.matcher(aName);
aName = matchSpace.replaceAll("_");
return "<div class=\"reportSection\"><a name=\"" + aName + "\">" + title + "</a></div>\n\n";
}
/**
* output the report block based on the passed mapping, where the mapping
* sould be "name of report element" => "value", where both sides of the
* mapping should be Strings. This class also assumes that the reference
* is a linkable URL to the resource
*
* @param content the statistic object array to be displayed
*
* @return a string containing the statistics block HTML formatted
*/
public String statBlock(Statistics content)
{
StringBuffer frag = new StringBuffer();
Stat[] stats = content.getStats();
// start the table
frag.append("<table align=\"center\" class=\"reportBlock\" cellpadding=\"5\">\n");
// prepare the table headers
if (content.getStatName() != null || content.getResultName() != null)
{
frag.append("\t<tr>\n");
frag.append("\t\t<th>\n");
if (content.getStatName() != null)
{
frag.append("\t\t\t" + content.getStatName() + "\n");
}
else
{
frag.append("\t\t\t \n");
}
frag.append("\t\t</th>\n");
frag.append("\t\t<th>\n");
if (content.getResultName() != null)
{
frag.append("\t\t\t" + content.getResultName() + "\n");
}
else
{
frag.append("\t\t\t \n");
}
frag.append("\t\t</th>\n");
frag.append("\t</tr>\n");
}
// output the statistics in the table
for (int i = 0; i < stats.length; i++)
{
String style = null;
if ((i & 1) == 1)
{
style = "reportOddRow";
}
else
{
style = "reportEvenRow";
}
frag.append("\t<tr class=\"" + style + "\">\n\t\t<td>\n");
frag.append("\t\t\t");
if (stats[i].getReference() != null)
{
frag.append("<a href=\"" + stats[i].getReference() + "\" ");
frag.append("target=\"_blank\">");
}
frag.append(this.clean(stats[i].getKey()));
if (stats[i].getReference() != null)
{
frag.append("</a>");
}
frag.append("\n");
frag.append("\t\t</td>\n\t\t<td class=\"rightAlign\">\n");
frag.append("\t\t\t").append(ReportTools.numberFormat(stats[i].getValue()));
if (stats[i].getUnits() != null)
{
frag.append(" ").append(stats[i].getUnits());
}
frag.append("\n");
frag.append("\t\t</td>\n\t</tr>\n");
}
frag.append("</table>\n");
return frag.toString();
}
/**
* output the floor information in HTML format
*
* @param floor the floor number for the section being displayed
*
* @return a string containing floor information HTML formatted
*/
public String floorInfo(int floor)
{
if (floor > 0)
{
StringBuffer frag = new StringBuffer();
frag.append("<div class=\"reportFloor\">");
frag.append("(more than " + ReportTools.numberFormat(floor) + " times)");
frag.append("</div>\n");
return frag.toString();
}
else
{
return "";
}
}
/**
* output the explanation of the report block in HTML format
*
* @param explanation some text explaining the coming report block
*
* @return a string containing an explanaton HTML formatted
*/
public String blockExplanation(String explanation)
{
if (explanation != null)
{
StringBuffer frag = new StringBuffer();
frag.append("<div class=\"reportExplanation\">");
frag.append(explanation);
frag.append("</div>\n\n");
return frag.toString();
}
else
{
return "";
}
}
/**
* output the final footers for this file
*
* @return a string containing the report footer
*/
public String footer()
{
return "";
}
/**
* Clean Stirngs for display in HTML
*
* @param s The String to clean
* @return The cleaned String
*/
private String clean(String s)
{
// Clean up the statistics keys
s = s.replace("<", "<");
s = s.replaceAll(">", ">");
return s;
}
}
| Java |
/**
* The contents of this file are subject to the license and copyright
* detailed in the LICENSE and NOTICE files at the root of the source
* tree and available online at
*
* http://www.dspace.org/license/
*/
package org.dspace.app.statistics;
import java.util.Date;
/**
* This class represents a single log file line and the operations that can be
* performed on it
*
* The components that it represents are: Date, Level, User, Action, and additional
* Params
*
* @author Richard Jones
*/
public class LogLine
{
/** the date of the log file line */
private Date date = null;
/** the level of the log line type */
private String level = null;
/** the user performing the logged action */
private String user = null;
/** the action being performed */
private String action = null;
/** the parameters associated with the line */
private String params = null;
/**
* constructor to create new statistic
*/
LogLine(Date date, String level, String user, String action, String params)
{
this.date = date;
this.level = level;
this.user = user;
this.action = action;
this.params = params;
}
/**
* get the date of the log line
*
* @return the date of this log line
*/
public Date getDate()
{
return this.date == null ? null : new Date(this.date.getTime());
}
/**
* get the level of this log line
*
* @return the level of the log line
*/
public String getLevel()
{
return this.level;
}
/**
* get the user performing the logged action
*
* @return the user performing the logged action
*/
public String getUser()
{
return this.user;
}
/**
* get the action being performed
*
* @return the logged action
*/
public String getAction()
{
return this.action;
}
/**
* get the parameters associated with the action
*
* @return the parameters associated with the action
*/
public String getParams()
{
return this.params;
}
/**
* find out if this log file line is before the given date
*
* @param date the date to be compared to
*
* @return true if the line is before the given date, false if not
*/
public boolean beforeDate(Date date)
{
if (date != null)
{
return (date.compareTo(this.date) >= 0);
}
return false;
}
/**
* find out if this log file line is after the given date
*
* @param date the date to be compared to
*
* @return true if the line is after the given date, false if not
*/
public boolean afterDate(Date date)
{
if (date != null)
{
return (date.compareTo(this.date) <= 0);
}
return false;
}
/**
* find out if the log line is of the given level. Levels are either
* INFO, WARN or ERROR
*
* @param level the level we want to test for
*
* @return true if the line is of the specified level, false if not
*/
public boolean isLevel(String level)
{
if (this.getLevel().equals(level))
{
return true;
}
return false;
}
/**
* find out if the log line is of the given action. Actions are not
* directly constrained by the vocabulary, and any system module may define
* any action name for its behaviour
*
* @param action the action we want to test for
*
* @return true if the line is of the specified action, false if not
*/
public boolean isAction(String action)
{
if (this.getAction().equals(action))
{
return true;
}
return false;
}
}
| Java |
/**
* The contents of this file are subject to the license and copyright
* detailed in the LICENSE and NOTICE files at the root of the source
* tree and available online at
*
* http://www.dspace.org/license/
*/
package org.dspace.app.statistics;
import java.util.ArrayList;
import java.util.List;
/**
* This class provides a wrapper for a related set of statistics. It contains
* headers for the Stat key and value pairs for the convenience of displaying
* them to users as, for example, HTML table headers. It also holds the
* list of statistics, and can have them added to itself individually or in
* arrays
*
* @author Richard Jones
*/
public class Statistics
{
// FIXME: this class could probably do with some tidying
/** the header for the statistics type. Useful for outputting to user */
private String statName = null;
/** the header for the results. Useful for outputting to user */
private String resultName = null;
/** a list to hold all of the stat elements that this object contains */
private List<Stat> stats = new ArrayList<Stat>();
/** the floor value for this set of statistics */
private int floor = 0;
/** an explanation of this statistics set */
private String explanation = null;
/** the main section header for this set of statistics */
private String sectionHeader = null;
/**
* constructor to create new set of statistics
*/
Statistics()
{
// empty constructor
}
/**
* constructor to create new statistic with relevant headers
*
* @param statName name of the statistic
* @param resultName name of the result
*/
Statistics(String statName, String resultName)
{
this.statName = statName;
this.resultName = resultName;
}
/**
* constructor to create new statistic with relevant headers
*
* @param statName name of the statistic
* @param resultName name of the result
*/
Statistics(String statName, String resultName, int floor)
{
this.statName = statName;
this.resultName = resultName;
this.floor = floor;
}
/**
* add an individual statistic to this object
*
* @param stat a statistic for this object
*/
public void add(Stat stat)
{
this.stats.add(stat);
return;
}
/**
* set the name of the statistic column
*
* @param name the name of the statistic column
*/
public void setStatName(String name)
{
this.statName = name;
}
/**
* set the name of the results column
*
* @param name the name of the results column
*/
public void setResultName(String name)
{
this.resultName = name;
}
/**
* set the explanatory or clarification information for this block of stats
*
* @param explanation the explanation for this stat block
*/
public void setExplanation(String explanation)
{
this.explanation = explanation;
}
/**
* get the explanation or clarification information for this block of stats
*
* @return the explanation for this stat block
*/
public String getExplanation()
{
return this.explanation;
}
/**
* set the floor value used in this stat block
*
* @param floor the floor value for this stat block
*/
public void setFloor(int floor)
{
this.floor = floor;
}
/**
* get the floor value used in this stat block
*
* @return the floor value for this stat block
*/
public int getFloor()
{
return this.floor;
}
/**
* set the header for this particular stats block
*
* @param header for this stats block
*/
public void setSectionHeader(String header)
{
this.sectionHeader = header;
}
/**
* get the header for this particular stats block
*
* @return the header for this stats block
*/
public String getSectionHeader()
{
return this.sectionHeader;
}
/**
* add an array of statistics to this object
*
* @param stats an array of statistics
*/
public void add(Stat[] stats)
{
for (int i = 0; i < stats.length; i++)
{
this.stats.add(stats[i]);
}
return;
}
/**
* get an array of statistics back from this object
*
* @return the statistics array
*/
public Stat[] getStats()
{
Stat[] myStats = new Stat[stats.size()];
myStats = (Stat[]) stats.toArray(myStats);
return myStats;
}
/**
* get the name of the statistic
*
* @return the name of the statistic
*/
public String getStatName()
{
return statName;
}
/**
* get the name of the result set
*
* @return the name of the result set
*/
public String getResultName()
{
return resultName;
}
}
| Java |
/**
* The contents of this file are subject to the license and copyright
* detailed in the LICENSE and NOTICE files at the root of the source
* tree and available online at
*
* http://www.dspace.org/license/
*/
package org.dspace.app.statistics;
import java.io.File;
import java.io.FileInputStream;
import java.util.Calendar;
import java.util.Date;
import java.util.GregorianCalendar;
import java.util.Properties;
import org.apache.commons.cli.CommandLine;
import org.apache.commons.cli.CommandLineParser;
import org.apache.commons.cli.Options;
import org.apache.commons.cli.PosixParser;
import org.dspace.core.Context;
import org.dspace.core.ConfigurationManager;
/**
* This class allows the running of the DSpace statistic tools
*
* Usage: java CreateStatReport -r <statistic to run>
* Available: <stat-initial> <stat-general> <stat-monthly> <stat-report-initial>
* <stat-report-general> <stat-report-monthly>
*
* @author Chris Yates
*
*/
public class CreateStatReport {
/**Current date and time*/
private static Calendar calendar = null;
/**Reporting start date and time*/
private static Calendar reportStartDate = null;
/**Path of log directory*/
private static String outputLogDirectory = null;
/**Path of reporting directory*/
private static String outputReportDirectory = null;
/**File suffix for log files*/
private static String outputSuffix = ".dat";
/**User context*/
private static Context context;
/** the config file from which to configure the analyser */
private static String configFile = ConfigurationManager.getProperty("dspace.dir") +
File.separator + "config" + File.separator +
"dstat.cfg";
/*
* Main method to be run from the command line executes individual statistic methods
*
* Usage: java CreateStatReport -r <statistic to run>
*/
public static void main(String[] argv) throws Exception {
// Open the statistics config file
FileInputStream fis = new java.io.FileInputStream(new File(configFile));
Properties config = new Properties();
config.load(fis);
int startMonth = 0;
int startYear = 2005;
try
{
startYear = Integer.parseInt(config.getProperty("start.year", "1").trim());
} catch (NumberFormatException nfe)
{
System.err.println("start.year is incorrectly set in dstat.cfg. Must be a number (e.g. 2005).");
System.exit(0);
}
try
{
startMonth = Integer.parseInt(config.getProperty("start.month", "2005").trim());
} catch (NumberFormatException nfe)
{
System.err.println("start.month is incorrectly set in dstat.cfg. Must be a number between 1 and 12.");
System.exit(0);
}
reportStartDate = new GregorianCalendar(startYear, startMonth - 1, 1);
calendar = new GregorianCalendar();
// create context as super user
context = new Context();
context.setIgnoreAuthorization(true);
//get paths to directories
outputLogDirectory = ConfigurationManager.getProperty("log.dir") + File.separator;
outputReportDirectory = ConfigurationManager.getProperty("report.dir") + File.separator;
//read in command line variable to determine which statistic to run
CommandLineParser parser = new PosixParser();
Options options = new Options();
options.addOption("r", "report", true, "report");
CommandLine line = parser.parse(options, argv);
String statAction = null;
if(line.hasOption('r'))
{
statAction = line.getOptionValue('r');
}
if (statAction == null) {
usage();
System.exit(0);
}
//call appropriate statistics method
if(statAction.equals("stat-monthly")) {
statMonthly();
}
if(statAction.equals("stat-general")) {
statGeneral();
}
if(statAction.equals("stat-initial")) {
statInitial();
}
if(statAction.equals("stat-report-general")) {
statReportGeneral();
}
if(statAction.equals("stat-report-initial")) {
statReportInitial();
}
if(statAction.equals("stat-report-monthly")) {
statReportMonthly();
}
}
/**
* This method generates a report from the first of the current month to the end of the current month.
*
* @throws Exception
*/
private static void statMonthly() throws Exception {
//Output Prefix
String outputPrefix = "dspace-log-monthly-";
// set up our command line variables
String myLogDir = null;
String myFileTemplate = null;
String myConfigFile = null;
StringBuffer myOutFile = null;
Date myStartDate = null;
Date myEndDate = null;
boolean myLookUp = false;
Calendar start = new GregorianCalendar( calendar.get(Calendar.YEAR),
calendar.get(Calendar.MONTH),
calendar.getActualMinimum(Calendar.DAY_OF_MONTH));
myStartDate = start.getTime();
Calendar end = new GregorianCalendar( calendar.get(Calendar.YEAR),
calendar.get(Calendar.MONTH),
calendar.getActualMaximum(Calendar.DAY_OF_MONTH));
myEndDate = end.getTime();
myOutFile = new StringBuffer(outputLogDirectory);
myOutFile.append(outputPrefix);
myOutFile.append(calendar.get(Calendar.YEAR));
myOutFile.append("-");
myOutFile.append(calendar.get(Calendar.MONTH)+1);
myOutFile.append(outputSuffix);
LogAnalyser.processLogs(context, myLogDir, myFileTemplate, myConfigFile, myOutFile.toString(), myStartDate, myEndDate, myLookUp);
}
/**
* This method generates a full report based on the full log period
*
* @throws Exception
*/
private static void statGeneral() throws Exception {
//Output Prefix
String outputPrefix = "dspace-log-general-";
// set up our command line variables
String myLogDir = null;
String myFileTemplate = null;
String myConfigFile = null;
StringBuffer myOutFile = null;
Date myStartDate = null;
Date myEndDate = null;
boolean myLookUp = false;
myOutFile = new StringBuffer(outputLogDirectory);
myOutFile.append(outputPrefix);
myOutFile.append(calendar.get(Calendar.YEAR));
myOutFile.append("-");
myOutFile.append(calendar.get(Calendar.MONTH)+1);
myOutFile.append("-");
myOutFile.append(calendar.get(Calendar.DAY_OF_MONTH));
myOutFile.append(outputSuffix);
LogAnalyser.processLogs(context, myLogDir, myFileTemplate, myConfigFile, myOutFile.toString(), myStartDate, myEndDate, myLookUp);
}
/**
* This script starts from the year and month specified below and loops each month until the current month
* generating a monthly aggregation files for the DStat system.
*
* @throws Exception
*/
private static void statInitial() throws Exception {
//Output Prefix
String outputPrefix = "dspace-log-monthly-";
// set up our command line variables
String myLogDir = null;
String myFileTemplate = null;
String myConfigFile = null;
StringBuffer myOutFile = null;
Date myStartDate = null;
Date myEndDate = null;
boolean myLookUp = false;
Calendar reportEndDate = new GregorianCalendar( calendar.get(Calendar.YEAR),
calendar.get(Calendar.MONTH),
calendar.getActualMaximum(Calendar.DAY_OF_MONTH));
Calendar currentMonth = (Calendar)reportStartDate.clone();
while(currentMonth.before(reportEndDate)) {
Calendar start = new GregorianCalendar( currentMonth.get(Calendar.YEAR),
currentMonth.get(Calendar.MONTH),
currentMonth.getActualMinimum(Calendar.DAY_OF_MONTH));
myStartDate = start.getTime();
Calendar end = new GregorianCalendar( currentMonth.get(Calendar.YEAR),
currentMonth.get(Calendar.MONTH),
currentMonth.getActualMaximum(Calendar.DAY_OF_MONTH));
myEndDate = end.getTime();
myOutFile = new StringBuffer(outputLogDirectory);
myOutFile.append(outputPrefix);
myOutFile.append(currentMonth.get(Calendar.YEAR));
myOutFile.append("-");
myOutFile.append(currentMonth.get(Calendar.MONTH)+1);
myOutFile.append(outputSuffix);
LogAnalyser.processLogs(context, myLogDir, myFileTemplate, myConfigFile, myOutFile.toString(), myStartDate, myEndDate, myLookUp);
currentMonth.add(Calendar.MONTH, 1);
}
}
/**
* This method generates a full report based on the full log period
*
* @throws Exception
*/
private static void statReportGeneral() throws Exception {
//Prefix
String inputPrefix = "dspace-log-general-";
String outputPrefix = "report-general-";
String myFormat = "html";
StringBuffer myInput = null;
StringBuffer myOutput = null;
String myMap = null;
myInput = new StringBuffer(outputLogDirectory);
myInput.append(inputPrefix);
myInput.append(calendar.get(Calendar.YEAR));
myInput.append("-");
myInput.append(calendar.get(Calendar.MONTH)+1);
myInput.append("-");
myInput.append(calendar.get(Calendar.DAY_OF_MONTH));
myInput.append(outputSuffix);
myOutput = new StringBuffer(outputReportDirectory);
myOutput.append(outputPrefix);
myOutput.append(calendar.get(Calendar.YEAR));
myOutput.append("-");
myOutput.append(calendar.get(Calendar.MONTH)+1);
myOutput.append("-");
myOutput.append(calendar.get(Calendar.DAY_OF_MONTH));
myOutput.append(".");
myOutput.append(myFormat);
ReportGenerator.processReport(context, myFormat, myInput.toString(), myOutput.toString(), myMap);
}
/**
* This script starts from the year and month specified below and loops each month until the current month
* generating monthly reports from the DStat aggregation files
*
* @throws Exception
*/
private static void statReportInitial() throws Exception {
//Prefix
String inputPrefix = "dspace-log-monthly-";
String outputPrefix = "report-";
String myFormat = "html";
StringBuffer myInput = null;
StringBuffer myOutput = null;
String myMap = null;
Calendar reportEndDate = new GregorianCalendar( calendar.get(Calendar.YEAR),
calendar.get(Calendar.MONTH),
calendar.getActualMaximum(Calendar.DAY_OF_MONTH));
Calendar currentMonth = (Calendar)reportStartDate.clone();
while(currentMonth.before(reportEndDate)) {
myInput = new StringBuffer(outputLogDirectory);
myInput.append(inputPrefix);
myInput.append(currentMonth.get(Calendar.YEAR));
myInput.append("-");
myInput.append(currentMonth.get(Calendar.MONTH)+1);
myInput.append(outputSuffix);
myOutput = new StringBuffer(outputReportDirectory);
myOutput.append(outputPrefix);
myOutput.append(currentMonth.get(Calendar.YEAR));
myOutput.append("-");
myOutput.append(currentMonth.get(Calendar.MONTH)+1);
myOutput.append(".");
myOutput.append(myFormat);
ReportGenerator.processReport(context, myFormat, myInput.toString(), myOutput.toString(), myMap);
currentMonth.add(Calendar.MONTH, 1);
}
}
/**
* This method generates a report from the aggregation files which have been run for the most recent month
*
* @throws Exception
*/
private static void statReportMonthly() throws Exception
{
//Prefix
String inputPrefix = "dspace-log-monthly-";
String outputPrefix = "report-";
String myFormat = "html";
StringBuffer myInput = null;
StringBuffer myOutput = null;
String myMap = null;
myInput = new StringBuffer(outputLogDirectory);
myInput.append(inputPrefix);
myInput.append(calendar.get(Calendar.YEAR));
myInput.append("-");
myInput.append(calendar.get(Calendar.MONTH)+1);
myInput.append(outputSuffix);
myOutput = new StringBuffer(outputReportDirectory);
myOutput.append(outputPrefix);
myOutput.append(calendar.get(Calendar.YEAR));
myOutput.append("-");
myOutput.append(calendar.get(Calendar.MONTH)+1);
myOutput.append(".");
myOutput.append(myFormat);
ReportGenerator.processReport(context, myFormat, myInput.toString(), myOutput.toString(), myMap);
}
/*
* Output the usage information
*/
private static void usage() throws Exception {
System.out.println("Usage: java CreateStatReport -r <statistic to run>");
System.out.println("Available: <stat-initial> <stat-general> <stat-monthly> <stat-report-initial> <stat-report-general> <stat-report-monthly>");
return;
}
}
| Java |
/**
* The contents of this file are subject to the license and copyright
* detailed in the LICENSE and NOTICE files at the root of the source
* tree and available online at
*
* http://www.dspace.org/license/
*/
package org.dspace.app.statistics;
/**
* This is a primitive class to represent a single statistic, which will
* generally be a key value pair but with the capabilities for being sorted
*
* Note: this class has a natural ordering that is inconsistent with equals
*
* @author Richard Jones
*/
public class Stat implements Comparable
{
// FIXME: this class is functional but a bit messy, and should be neatened
// up and completed
/** the key, which is effectively the text of the statistic */
private String key = null;
/** the value assigned to the key, generally a count of the key */
private int value = 0;
/** a reference to an external resource which relates to this statistic */
private String reference = null;
/** the units that this statistic is in */
private String units = null;
/**
* constructor to create new statistic
*
* @param key the key for the statistic
* @param value the value for the statistic
*/
Stat(String key, int value)
{
this.key = key;
this.value = value;
}
/**
* constructor to create new statistic
*
* @param key the key for the statistic
* @param value the value for the statistic
* @param reference the value for the external reference
*/
Stat(String key, int value, String reference)
{
this.key = key;
this.value = value;
this.reference = reference;
}
/**
* set the units of this statistic
*
* @param unit the units that this statistic is measured in
*/
public void setUnits(String unit)
{
this.units = unit;
}
/**
* get the unts that this statistic is measured in
*
* @return the units this statistic is measured in
*/
public String getUnits()
{
return this.units;
}
/**
* get the value of the statistic
*
* @return the value of this statistic
*/
public int getValue()
{
return this.value;
}
/**
* get the key (text describing) the statistic
*
* @return the key for this statistic
*/
public String getKey()
{
return this.key;
}
/**
* get the reference to related statistic information
*
* @return the reference for this statistic
*/
public String getReference()
{
return this.reference;
}
/**
* set the reference information
*
* @param key the key for this statistic
*/
public void setKey(String key)
{
this.key = key;
}
/**
* set the reference information
*
* @param reference the reference for this statistic
*/
public void setReference(String reference)
{
this.reference = reference;
}
/**
* compare the current object to the given object returning -1 if o is less
* than the current object, 0 if they are the same, and +1 if o is greater
* than the current object.
*
* @param o the object to compare to the current one
*
* @return +1, 0, -1 if o is less than, equal to, or greater than the
* current object value.
*/
public int compareTo(Object o)
{
int objectValue = ((Stat) o).getValue();
if (objectValue < this.getValue())
{
return -1;
}
else if (objectValue == this.getValue())
{
return 0;
}
else if (objectValue > this.getValue())
{
return 1;
}
return 0;
}
}
| Java |
/**
* The contents of this file are subject to the license and copyright
* detailed in the LICENSE and NOTICE files at the root of the source
* tree and available online at
*
* http://www.dspace.org/license/
*/
package org.dspace.app.statistics;
import org.dspace.core.ConfigurationManager;
import org.dspace.core.Context;
import org.dspace.core.LogManager;
import org.dspace.storage.rdbms.DatabaseManager;
import org.dspace.storage.rdbms.TableRow;
import java.sql.SQLException;
import java.text.ParseException;
import java.text.SimpleDateFormat;
import java.util.ArrayList;
import java.util.Calendar;
import java.util.Date;
import java.util.GregorianCalendar;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import java.util.StringTokenizer;
import java.io.BufferedReader;
import java.io.BufferedWriter;
import java.io.File;
import java.io.FileReader;
import java.io.FileWriter;
import java.io.IOException;
/**
* This class performs all the actual analysis of a given set of DSpace log
* files. Most input can be configured; use the -help flag for a full list
* of usage information.
*
* The output of this file is plain text and forms an "aggregation" file which
* can then be used for display purposes using the related ReportGenerator
* class.
*
* @author Richard Jones
*/
public class LogAnalyser
{
// set up our class globals
// FIXME: there are so many of these perhaps they should exist in a static
// object of their own
/////////////////
// aggregators
/////////////////
/** aggregator for all actions performed in the system */
private static Map<String, Integer> actionAggregator;
/** aggregator for all searches performed */
private static Map<String, Integer> searchAggregator;
/** aggregator for user logins */
private static Map<String, Integer> userAggregator;
/** aggregator for item views */
private static Map<String, Integer> itemAggregator;
/** aggregator for current archive state statistics */
private static Map<String, Integer> archiveStats;
/** warning counter */
private static int warnCount = 0;
/** log line counter */
private static int lineCount = 0;
//////////////////
// config data
//////////////////
/** list of actions to be included in the general summary */
private static List<String> generalSummary;
/** list of words not to be aggregated */
private static List<String> excludeWords;
/** list of search types to be ignored, such as "author:" */
private static List<String> excludeTypes;
/** list of characters to be excluded */
private static List<String> excludeChars;
/** list of item types to be reported on in the current state */
private static List<String> itemTypes;
/** bottom limit to output for search word analysis */
private static int searchFloor;
/** bottom limit to output for item view analysis */
private static int itemFloor;
/** number of items from most popular to be looked up in the database */
private static int itemLookup;
/** mode to use for user email display */
private static String userEmail;
/** URL of the service being analysed */
private static String url;
/** Name of the service being analysed */
private static String name;
/** Name of the service being analysed */
private static String hostName;
/** the average number of views per item */
private static int views = 0;
///////////////////////
// regular expressions
///////////////////////
/** Exclude characters regular expression pattern */
private static Pattern excludeCharRX = null;
/** handle indicator string regular expression pattern */
private static Pattern handleRX = null;
/** item id indicator string regular expression pattern */
private static Pattern itemRX = null;
/** query string indicator regular expression pattern */
private static Pattern queryRX = null;
/** collection indicator regular expression pattern */
private static Pattern collectionRX = null;
/** community indicator regular expression pattern */
private static Pattern communityRX = null;
/** results indicator regular expression pattern */
private static Pattern resultsRX = null;
/** single character regular expression pattern */
private static Pattern singleRX = null;
/** a pattern to match a valid version 1.3 log file line */
private static Pattern valid13 = null;
/** a pattern to match a valid version 1.4 log file line */
private static Pattern valid14 = null;
/** pattern to match valid log file names */
private static Pattern logRegex = null;
/** pattern to match commented out lines from the config file */
private static Pattern comment = Pattern.compile("^#");
/** pattern to match genuine lines from the config file */
private static Pattern real = Pattern.compile("^(.+)=(.+)");
/** pattern to match all search types */
private static Pattern typeRX = null;
/** pattern to match all search types */
private static Pattern wordRX = null;
//////////////////////////
// Miscellaneous variables
//////////////////////////
/** process timing clock */
private static Calendar startTime = null;
/////////////////////////
// command line options
////////////////////////
/** the log directory to be analysed */
private static String logDir = ConfigurationManager.getProperty("log.dir");
/** the regex to describe the file name format */
private static String fileTemplate = "dspace\\.log.*";
/** the config file from which to configure the analyser */
private static String configFile = ConfigurationManager.getProperty("dspace.dir") +
File.separator + "config" + File.separator +
"dstat.cfg";
/** the output file to which to write aggregation data */
private static String outFile = ConfigurationManager.getProperty("log.dir") + File.separator + "dstat.dat";
/** the starting date of the report */
private static Date startDate = null;
/** the end date of the report */
private static Date endDate = null;
/** the starting date of the report as obtained from the log files */
private static Date logStartDate = null;
/** the end date of the report as obtained from the log files */
private static Date logEndDate = null;
/**
* main method to be run from command line. See usage information for
* details as to how to use the command line flags (-help)
*/
public static void main(String [] argv)
throws Exception, SQLException
{
// first, start the processing clock
startTime = new GregorianCalendar();
// create context as super user
Context context = new Context();
context.setIgnoreAuthorization(true);
// set up our command line variables
String myLogDir = null;
String myFileTemplate = null;
String myConfigFile = null;
String myOutFile = null;
Date myStartDate = null;
Date myEndDate = null;
boolean myLookUp = false;
// read in our command line options
for (int i = 0; i < argv.length; i++)
{
if (argv[i].equals("-log"))
{
myLogDir = argv[i+1];
}
if (argv[i].equals("-file"))
{
myFileTemplate = argv[i+1];
}
if (argv[i].equals("-cfg"))
{
myConfigFile = argv[i+1];
}
if (argv[i].equals("-out"))
{
myOutFile = argv[i+1];
}
if (argv[i].equals("-help"))
{
LogAnalyser.usage();
System.exit(0);
}
if (argv[i].equals("-start"))
{
myStartDate = parseDate(argv[i+1]);
}
if (argv[i].equals("-end"))
{
myEndDate = parseDate(argv[i+1]);
}
if (argv[i].equals("-lookup"))
{
myLookUp = true;
}
}
// now call the method which actually processes the logs
processLogs(context, myLogDir, myFileTemplate, myConfigFile, myOutFile, myStartDate, myEndDate, myLookUp);
}
/**
* using the pre-configuration information passed here, analyse the logs
* and produce the aggregation file
*
* @param context the DSpace context object this occurs under
* @param myLogDir the passed log directory. Uses default if null
* @param myFileTemplate the passed file name regex. Uses default if null
* @param myConfigFile the DStat config file. Uses default if null
* @param myOutFile the file to which to output aggregation data. Uses default if null
* @param myStartDate the desired start of the analysis. Starts from the beginning otherwise
* @param myEndDate the desired end of the analysis. Goes to the end otherwise
* @param myLookUp force a lookup of the database
*/
public static void processLogs(Context context, String myLogDir,
String myFileTemplate, String myConfigFile,
String myOutFile, Date myStartDate,
Date myEndDate, boolean myLookUp)
throws IOException, SQLException
{
// FIXME: perhaps we should have all parameters and aggregators put
// together in a single aggregating object
// if the timer has not yet been started, then start it
startTime = new GregorianCalendar();
//instantiate aggregators
actionAggregator = new HashMap<String, Integer>();
searchAggregator = new HashMap<String, Integer>();
userAggregator = new HashMap<String, Integer>();
itemAggregator = new HashMap<String, Integer>();
archiveStats = new HashMap<String, Integer>();
//instantiate lists
generalSummary = new ArrayList<String>();
excludeWords = new ArrayList<String>();
excludeTypes = new ArrayList<String>();
excludeChars = new ArrayList<String>();
itemTypes = new ArrayList<String>();
// set the parameters for this analysis
setParameters(myLogDir, myFileTemplate, myConfigFile, myOutFile, myStartDate, myEndDate, myLookUp);
// pre prepare our standard file readers and buffered readers
FileReader fr = null;
BufferedReader br = null;
// read in the config information, throwing an error if we fail to open
// the given config file
readConfig(configFile);
// assemble the regular expressions for later use (requires the file
// template to build the regex to match it
setRegex(fileTemplate);
// get the log files
File[] logFiles = getLogFiles(logDir);
// standard loop counter
int i = 0;
// for every log file do analysis
// FIXME: it is easy to implement not processing log files after the
// dates exceed the end boundary, but is there an easy way to do it
// for the start of the file? Note that we can assume that the contents
// of the log file are sequential, but can we assume the files are
// provided in a data sequence?
for (i = 0; i < logFiles.length; i++)
{
// check to see if this file is a log file agains the global regex
Matcher matchRegex = logRegex.matcher(logFiles[i].getName());
if (matchRegex.matches())
{
// if it is a log file, open it up and lets have a look at the
// contents.
try
{
fr = new FileReader(logFiles[i].toString());
br = new BufferedReader(fr);
}
catch (IOException e)
{
System.out.println("Failed to read log file " + logFiles[i].toString());
System.exit(0);
}
// for each line in the file do the analysis
// FIXME: perhaps each section needs to be dolled out to an
// analysing class to allow pluggability of other methods of
// analysis, and ease of code reading too - Pending further thought
String line = null;
while ((line = br.readLine()) != null)
{
// get the log line object
LogLine logLine = getLogLine(line);
// if there are line segments get on with the analysis
if (logLine != null)
{
// first find out if we are constraining by date and
// if so apply the restrictions
if ((startDate != null) && (!logLine.afterDate(startDate)))
{
continue;
}
if ((endDate !=null) && (!logLine.beforeDate(endDate)))
{
break;
}
// count the number of lines parsed
lineCount++;
// if we are not constrained by date, register the date
// as the start/end date if it is the earliest/latest so far
// FIXME: this should probably have a method of its own
if (startDate == null)
{
if (logStartDate != null)
{
if (logLine.beforeDate(logStartDate))
{
logStartDate = logLine.getDate();
}
}
else
{
logStartDate = logLine.getDate();
}
}
if (endDate == null)
{
if (logEndDate != null)
{
if (logLine.afterDate(logEndDate))
{
logEndDate = logLine.getDate();
}
}
else
{
logEndDate = logLine.getDate();
}
}
// count the warnings
if (logLine.isLevel("WARN"))
{
// FIXME: really, this ought to be some kind of level
// aggregator
warnCount++;
}
// is the action a search?
if (logLine.isAction("search"))
{
// get back all the valid search words from the query
String[] words = analyseQuery(logLine.getParams());
// for each search word add to the aggregator or
// increment the aggregator's counter
for (int j = 0; j < words.length; j++)
{
// FIXME: perhaps aggregators ought to be objects
// themselves
searchAggregator.put(words[j], increment(searchAggregator, words[j]));
}
}
// is the action a login, and are we counting user logins?
if (logLine.isAction("login") && !userEmail.equals("off"))
{
userAggregator.put(logLine.getUser(), increment(userAggregator, logLine.getUser()));
}
// is the action an item view?
if (logLine.isAction("view_item"))
{
String handle = logLine.getParams();
// strip the handle string
Matcher matchHandle = handleRX.matcher(handle);
handle = matchHandle.replaceAll("");
// strip the item id string
Matcher matchItem = itemRX.matcher(handle);
handle = matchItem.replaceAll("").trim();
// either add the handle to the aggregator or
// increment its counter
itemAggregator.put(handle, increment(itemAggregator, handle));
}
// log all the activity
actionAggregator.put(logLine.getAction(), increment(actionAggregator, logLine.getAction()));
}
}
// close the file reading buffers
br.close();
fr.close();
}
}
// do we want to do a database lookup? Do so only if the start and
// end dates are null or lookUp is true
// FIXME: this is a kind of separate section. Would it be worth building
// the summary string separately and then inserting it into the real
// summary later? Especially if we make the archive analysis more complex
archiveStats.put("All Items", getNumItems(context));
for (i = 0; i < itemTypes.size(); i++)
{
archiveStats.put(itemTypes.get(i), getNumItems(context, itemTypes.get(i)));
}
// now do the host name and url lookup
hostName = ConfigurationManager.getProperty("dspace.hostname").trim();
name = ConfigurationManager.getProperty("dspace.name").trim();
url = ConfigurationManager.getProperty("dspace.url").trim();
if ((url != null) && (!url.endsWith("/")))
{
url = url + "/";
}
// do the average views analysis
if ((archiveStats.get("All Items")).intValue() != 0)
{
// FIXME: this is dependent on their being a query on the db, which
// there might not always be if it becomes configurable
Double avg = Math.ceil(
(actionAggregator.get("view_item")).doubleValue() /
(archiveStats.get("All Items")).doubleValue());
views = avg.intValue();
}
// finally, write the output
createOutput();
return;
}
/**
* set the passed parameters up as global class variables. This has to
* be done in a separate method because the API permits for running from
* the command line with args or calling the processLogs method statically
* from elsewhere
*
* @param myLogDir the log file directory to be analysed
* @param myFileTemplate regex for log file names
* @param myConfigFile config file to use for dstat
* @param myOutFile file to write the aggregation into
* @param myStartDate requested log reporting start date
* @param myEndDate requested log reporting end date
* @param myLookUp requested look up force flag
*/
public static void setParameters(String myLogDir, String myFileTemplate,
String myConfigFile, String myOutFile,
Date myStartDate, Date myEndDate,
boolean myLookUp)
{
if (myLogDir != null)
{
logDir = myLogDir;
}
if (myFileTemplate != null)
{
fileTemplate = myFileTemplate;
}
if (myConfigFile != null)
{
configFile = myConfigFile;
}
if (myStartDate != null)
{
startDate = new Date(myStartDate.getTime());
}
if (myEndDate != null)
{
endDate = new Date(myEndDate.getTime());
}
if (myOutFile != null)
{
outFile = myOutFile;
}
return;
}
/**
* generate the analyser's output to the specified out file
*/
public static void createOutput()
{
// start a string buffer to hold the final output
StringBuffer summary = new StringBuffer();
// define an iterator that will be used to go over the hashmap keys
Iterator<String> keys = null;
// output the number of lines parsed
summary.append("log_lines=" + Integer.toString(lineCount) + "\n");
// output the number of warnings encountered
summary.append("warnings=" + Integer.toString(warnCount) + "\n");
// set the general summary config up in the aggregator file
for (int i = 0; i < generalSummary.size(); i++)
{
summary.append("general_summary=" + generalSummary.get(i) + "\n");
}
// output the host name
summary.append("server_name=" + hostName + "\n");
// output the service name
summary.append("service_name=" + name + "\n");
// output the date information if necessary
SimpleDateFormat sdf = new SimpleDateFormat("dd'/'MM'/'yyyy");
if (startDate != null)
{
summary.append("start_date=" + sdf.format(startDate) + "\n");
}
else if (logStartDate != null)
{
summary.append("start_date=" + sdf.format(logStartDate) + "\n");
}
if (endDate != null)
{
summary.append("end_date=" + sdf.format(endDate) + "\n");
}
else if (logEndDate != null)
{
summary.append("end_date=" + sdf.format(logEndDate) + "\n");
}
// write out the archive stats
keys = archiveStats.keySet().iterator();
while (keys.hasNext())
{
String key = keys.next();
summary.append("archive." + key + "=" + archiveStats.get(key) + "\n");
}
// write out the action aggregation results
keys = actionAggregator.keySet().iterator();
while (keys.hasNext())
{
String key = keys.next();
summary.append("action." + key + "=" + actionAggregator.get(key) + "\n");
}
// depending on the config settings for reporting on emails output the
// login information
summary.append("user_email=" + userEmail + "\n");
int address = 1;
keys = userAggregator.keySet().iterator();
// for each email address either write out the address and the count
// or alias it with an "Address X" label, to keep the data confidential
// FIXME: the users reporting should also have a floor value
while (keys.hasNext())
{
String key = keys.next();
summary.append("user.");
if (userEmail.equals("on"))
{
summary.append(key + "=" + userAggregator.get(key) + "\n");
}
else if (userEmail.equals("alias"))
{
summary.append("Address " + Integer.toString(address++) + "=" + userAggregator.get(key) + "\n");
}
}
// FIXME: all values which have floors set should provide an "other"
// record which counts how many other things which didn't make it into
// the listing there are
// output the search word information
summary.append("search_floor=" + searchFloor + "\n");
keys = searchAggregator.keySet().iterator();
while (keys.hasNext())
{
String key = keys.next();
if ((searchAggregator.get(key)).intValue() >= searchFloor)
{
summary.append("search." + key + "=" + searchAggregator.get(key) + "\n");
}
}
// FIXME: we should do a lot more with the search aggregator
// Possible feature list:
// - constrain by collection/community perhaps?
// - we should consider building our own aggregator class which can
// be full of rich data. Perhaps this and the Stats class should
// be the same thing.
// item viewing information
summary.append("item_floor=" + itemFloor + "\n");
summary.append("host_url=" + url + "\n");
summary.append("item_lookup=" + itemLookup + "\n");
// write out the item access information
keys = itemAggregator.keySet().iterator();
while (keys.hasNext())
{
String key = keys.next();
if ((itemAggregator.get(key)).intValue() >= itemFloor)
{
summary.append("item." + key + "=" + itemAggregator.get(key) + "\n");
}
}
// output the average views per item
if (views > 0)
{
summary.append("avg_item_views=" + views + "\n");
}
// insert the analysis processing time information
Calendar endTime = new GregorianCalendar();
long timeInMillis = (endTime.getTimeInMillis() - startTime.getTimeInMillis());
summary.append("analysis_process_time=" + Long.toString(timeInMillis / 1000) + "\n");
// finally write the string into the output file
try
{
BufferedWriter out = new BufferedWriter(new FileWriter(outFile));
out.write(summary.toString());
out.flush();
out.close();
}
catch (IOException e)
{
System.out.println("Unable to write to output file " + outFile);
System.exit(0);
}
return;
}
/**
* get an array of file objects representing the passed log directory
*
* @param logDir the log directory in which to pick up files
*
* @return an array of file objects representing the given logDir
*/
public static File[] getLogFiles(String logDir)
{
// open the log files directory, read in the files, check that they
// match the passed regular expression then analyse the content
File logs = new File(logDir);
// if log dir is not a directory throw and error and exit
if (!logs.isDirectory())
{
System.out.println("Passed log directory is not a directory");
System.exit(0);
}
// get the files in the directory
return logs.listFiles();
}
/**
* set up the regular expressions to be used by this analyser. Mostly this
* exists to provide a degree of segregation and readability to the code
* and to ensure that you only need to set up the regular expressions to
* be used once
*
* @param fileTemplate the regex to be used to identify dspace log files
*/
public static void setRegex(String fileTemplate)
{
// build the exclude characters regular expression
StringBuffer charRegEx = new StringBuffer();
charRegEx.append("[");
for (int i = 0; i < excludeChars.size(); i++)
{
charRegEx.append("\\").append(excludeChars.get(i));
}
charRegEx.append("]");
excludeCharRX = Pattern.compile(charRegEx.toString());
// regular expression to find handle indicators in strings
handleRX = Pattern.compile("handle=");
// regular expression to find item_id indicators in strings
itemRX = Pattern.compile(",item_id=.*$");
// regular expression to find query indicators in strings
queryRX = Pattern.compile("query=");
// regular expression to find collections in strings
collectionRX = Pattern.compile("collection_id=[0-9]*,");
// regular expression to find communities in strings
communityRX = Pattern.compile("community_id=[0-9]*,");
// regular expression to find search result sets
resultsRX = Pattern.compile(",results=(.*)");
// regular expressions to find single characters anywhere in the string
singleRX = Pattern.compile("( . |^. | .$)");
// set up the standard log file line regular expression
String logLine13 = "^(\\d\\d\\d\\d-\\d\\d\\-\\d\\d) \\d\\d:\\d\\d:\\d\\d,\\d\\d\\d (\\w+)\\s+\\S+ @ ([^:]+):[^:]+:([^:]+):(.*)";
String logLine14 = "^(\\d\\d\\d\\d-\\d\\d\\-\\d\\d) \\d\\d:\\d\\d:\\d\\d,\\d\\d\\d (\\w+)\\s+\\S+ @ ([^:]+):[^:]+:[^:]+:([^:]+):(.*)";
valid13 = Pattern.compile(logLine13);
valid14 = Pattern.compile(logLine14);
// set up the pattern for validating log file names
logRegex = Pattern.compile(fileTemplate);
// set up the pattern for matching any of the query types
StringBuffer typeRXString = new StringBuffer();
typeRXString.append("(");
for (int i = 0; i < excludeTypes.size(); i++)
{
if (i > 0)
{
typeRXString.append("|");
}
typeRXString.append(excludeTypes.get(i));
}
typeRXString.append(")");
typeRX = Pattern.compile(typeRXString.toString());
// set up the pattern for matching any of the words to exclude
StringBuffer wordRXString = new StringBuffer();
wordRXString.append("(");
for (int i = 0; i < excludeWords.size(); i++)
{
if (i > 0)
{
wordRXString.append("|");
}
wordRXString.append(" " + excludeWords.get(i) + " ");
wordRXString.append("|");
wordRXString.append("^" + excludeWords.get(i) + " ");
wordRXString.append("|");
wordRXString.append(" " + excludeWords.get(i) + "$");
}
wordRXString.append(")");
wordRX = Pattern.compile(wordRXString.toString());
return;
}
/**
* get the current config file name
* @return The name of the config file
*/
public static String getConfigFile()
{
return configFile;
}
/**
* read in the given config file and populate the class globals
*
* @param configFile the config file to read in
*/
public static void readConfig() throws IOException
{
readConfig(configFile);
}
/**
* read in the given config file and populate the class globals
*
* @param configFile the config file to read in
*/
public static void readConfig(String configFile) throws IOException
{
//instantiate aggregators
actionAggregator = new HashMap<String, Integer>();
searchAggregator = new HashMap<String, Integer>();
userAggregator = new HashMap<String, Integer>();
itemAggregator = new HashMap<String, Integer>();
archiveStats = new HashMap<String, Integer>();
//instantiate lists
generalSummary = new ArrayList<String>();
excludeWords = new ArrayList<String>();
excludeTypes = new ArrayList<String>();
excludeChars = new ArrayList<String>();
itemTypes = new ArrayList<String>();
// prepare our standard file readers and buffered readers
FileReader fr = null;
BufferedReader br = null;
String record = null;
try
{
fr = new FileReader(configFile);
br = new BufferedReader(fr);
}
catch (IOException e)
{
System.out.println("Failed to read config file: " + configFile);
System.exit(0);
}
// read in the config file and set up our instance variables
while ((record = br.readLine()) != null)
{
// check to see what kind of line we have
Matcher matchComment = comment.matcher(record);
Matcher matchReal = real.matcher(record);
// if the line is not a comment and is real, read it in
if (!matchComment.matches() && matchReal.matches())
{
// lift the values out of the matcher's result groups
String key = matchReal.group(1).trim();
String value = matchReal.group(2).trim();
// read the config values into our instance variables (see
// documentation for more info on config params)
if (key.equals("general.summary"))
{
actionAggregator.put(value, Integer.valueOf(0));
generalSummary.add(value);
}
if (key.equals("exclude.word"))
{
excludeWords.add(value);
}
if (key.equals("exclude.type"))
{
excludeTypes.add(value);
}
if (key.equals("exclude.character"))
{
excludeChars.add(value);
}
if (key.equals("item.type"))
{
itemTypes.add(value);
}
if (key.equals("item.floor"))
{
itemFloor = Integer.parseInt(value);
}
if (key.equals("search.floor"))
{
searchFloor = Integer.parseInt(value);
}
if (key.equals("item.lookup"))
{
itemLookup = Integer.parseInt(value);
}
if (key.equals("user.email"))
{
userEmail = value;
}
}
}
// close the inputs
br.close();
fr.close();
return;
}
/**
* increment the value of the given map at the given key by one.
*
* @param map the map whose value we want to increase
* @param key the key of the map whose value to increase
*
* @return an integer object containing the new value
*/
public static Integer increment(Map<String, Integer> map, String key)
{
Integer newValue = null;
if (map.containsKey(key))
{
// FIXME: this seems like a ridiculous way to add Integers
newValue = Integer.valueOf((map.get(key)).intValue() + 1);
}
else
{
newValue = Integer.valueOf(1);
}
return newValue;
}
/**
* Take the standard date string requested at the command line and convert
* it into a Date object. Throws and error and exits if the date does
* not parse
*
* @param date the string representation of the date
*
* @return a date object containing the date, with the time set to
* 00:00:00
*/
public static Date parseDate(String date)
{
SimpleDateFormat sdf = new SimpleDateFormat("yyyy'-'MM'-'dd");
Date parsedDate = null;
try
{
parsedDate = sdf.parse(date);
}
catch (ParseException e)
{
System.out.println("The date is not in the correct format");
System.exit(0);
}
return parsedDate;
}
/**
* Take the date object and convert it into a string of the form YYYY-MM-DD
*
* @param date the date to be converted
*
* @return A string of the form YYYY-MM-DD
*/
public static String unParseDate(Date date)
{
// Use SimpleDateFormat
SimpleDateFormat sdf = new SimpleDateFormat("yyyy'-'MM'-'dd");
return sdf.format(date);
}
/**
* Take a search query string and pull out all of the meaningful information
* from it, giving the results in the form of a String array, a single word
* to each element
*
* @param query the search query to be analysed
*
* @return the string array containing meaningful search terms
*/
public static String[] analyseQuery(String query)
{
// register our standard loop counter
int i = 0;
// make the query string totally lower case, to ensure we don't miss out
// on matches due to capitalisation
query = query.toLowerCase();
// now perform successive find and replace operations using pre-defined
// global regular expressions
Matcher matchQuery = queryRX.matcher(query);
query = matchQuery.replaceAll(" ");
Matcher matchCollection = collectionRX.matcher(query);
query = matchCollection.replaceAll(" ");
Matcher matchCommunity = communityRX.matcher(query);
query = matchCommunity.replaceAll(" ");
Matcher matchResults = resultsRX.matcher(query);
query = matchResults.replaceAll(" ");
Matcher matchTypes = typeRX.matcher(query);
query = matchTypes.replaceAll(" ");
Matcher matchChars = excludeCharRX.matcher(query);
query = matchChars.replaceAll(" ");
Matcher matchWords = wordRX.matcher(query);
query = matchWords.replaceAll(" ");
Matcher single = singleRX.matcher(query);
query = single.replaceAll(" ");
// split the remaining string by whitespace, trim and stuff into an
// array to be returned
StringTokenizer st = new StringTokenizer(query);
String[] words = new String[st.countTokens()];
for (i = 0; i < words.length; i++)
{
words[i] = st.nextToken().trim();
}
// FIXME: some single characters are still slipping through the net;
// why? and how do we fix it?
return words;
}
/**
* split the given line into it's relevant segments if applicable (i.e. the
* line matches the required regular expression.
*
* @param line the line to be segmented
* @return a Log Line object for the given line
*/
public static LogLine getLogLine(String line)
{
// FIXME: consider moving this code into the LogLine class. To do this
// we need to much more carefully define the structure and behaviour
// of the LogLine class
Matcher match;
if (line.indexOf(":ip_addr") > 0)
{
match = valid14.matcher(line);
}
else
{
match = valid13.matcher(line);
}
if (match.matches())
{
// set up a new log line object
LogLine logLine = new LogLine(parseDate(match.group(1).trim()),
LogManager.unescapeLogField(match.group(2)).trim(),
LogManager.unescapeLogField(match.group(3)).trim(),
LogManager.unescapeLogField(match.group(4)).trim(),
LogManager.unescapeLogField(match.group(5)).trim());
return logLine;
}
else
{
return null;
}
}
/**
* get the number of items in the archive which were accessioned between
* the provided start and end dates, with the given value for the DC field
* 'type' (unqualified)
*
* @param context the DSpace context for the action
* @param type value for DC field 'type' (unqualified)
*
* @return an integer containing the relevant count
*/
public static Integer getNumItems(Context context, String type)
throws SQLException
{
boolean oracle = false;
if ("oracle".equals(ConfigurationManager.getProperty("db.name")))
{
oracle = true;
}
// FIXME: this method is clearly not optimised
// FIXME: we don't yet collect total statistics, such as number of items
// withdrawn, number in process of submission etc. We should probably do
// that
// start the type constraint
String typeQuery = null;
if (type != null)
{
typeQuery = "SELECT item_id " +
"FROM metadatavalue " +
"WHERE text_value LIKE '%" + type + "%' " +
"AND metadata_field_id = (" +
" SELECT metadata_field_id " +
" FROM metadatafieldregistry " +
" WHERE element = 'type' " +
" AND qualifier IS NULL) ";
}
// start the date constraint query buffer
StringBuffer dateQuery = new StringBuffer();
if (oracle)
{
dateQuery.append("SELECT /*+ ORDERED_PREDICATES */ item_id ");
}
else
{
dateQuery.append("SELECT item_id ");
}
dateQuery.append("FROM metadatavalue " +
"WHERE metadata_field_id = (" +
" SELECT metadata_field_id " +
" FROM metadatafieldregistry " +
" WHERE element = 'date' " +
" AND qualifier = 'accessioned') ");
if (startDate != null)
{
if (oracle)
{
dateQuery.append(" AND TO_TIMESTAMP( TO_CHAR(text_value), "+
"'yyyy-mm-dd\"T\"hh24:mi:ss\"Z\"' ) > TO_DATE('" +
unParseDate(startDate) + "', 'yyyy-MM-dd') ");
}
else
{
dateQuery.append(" AND text_value::timestamp > '" +
unParseDate(startDate) + "'::timestamp ");
}
}
if (endDate != null)
{
if (oracle)
{
dateQuery.append(" AND TO_TIMESTAMP( TO_CHAR(text_value), "+
"'yyyy-mm-dd\"T\"hh24:mi:ss\"Z\"' ) < TO_DATE('" +
unParseDate(endDate) + "', 'yyyy-MM-dd') ");
}
else
{
dateQuery.append(" AND text_value::timestamp < '" +
unParseDate(endDate) + "'::timestamp ");
}
}
// build the final query
StringBuffer query = new StringBuffer();
query.append("SELECT COUNT(*) AS num " +
"FROM item " +
"WHERE in_archive = " + (oracle ? "1 " : "true ") +
"AND withdrawn = " + (oracle ? "0 " : "false "));
if (startDate != null || endDate != null)
{
query.append(" AND item_id IN ( " +
dateQuery.toString() + ") ");
}
if (type != null)
{
query.append(" AND item_id IN ( " +
typeQuery + ") ");
}
TableRow row = DatabaseManager.querySingle(context, query.toString());
Integer numItems;
if (oracle)
{
numItems = Integer.valueOf(row.getIntColumn("num"));
}
else
{
// for some reason the number column is of "long" data type!
Long count = Long.valueOf(row.getLongColumn("num"));
numItems = Integer.valueOf(count.intValue());
}
return numItems;
}
/**
* get the total number of items in the archive at time of execution,
* ignoring all other constraints
*
* @param context the DSpace context the action is being performed in
*
* @return an Integer containing the number of items in the
* archive
*/
public static Integer getNumItems(Context context)
throws SQLException
{
return getNumItems(context, null);
}
/**
* print out the usage information for this class to the standard out
*/
public static void usage()
{
String usage = "Usage Information:\n" +
"LogAnalyser [options [parameters]]\n" +
"-log [log directory]\n" +
"\tOptional\n" +
"\tSpecify a directory containing log files\n" +
"\tDefault uses [dspace.dir]/log from dspace.cfg\n" +
"-file [file name regex]\n" +
"\tOptional\n" +
"\tSpecify a regular expression as the file name template.\n" +
"\tCurrently this needs to be correctly escaped for Java string handling (FIXME)\n" +
"\tDefault uses dspace.log*\n" +
"-cfg [config file path]\n" +
"\tOptional\n" +
"\tSpecify a config file to be used\n" +
"\tDefault uses dstat.cfg in dspace config directory\n" +
"-out [output file path]\n" +
"\tOptional\n" +
"\tSpecify an output file to write results into\n" +
"\tDefault uses dstat.dat in dspace log directory\n" +
"-start [YYYY-MM-DD]\n" +
"\tOptional\n" +
"\tSpecify the start date of the analysis\n" +
"\tIf a start date is specified then no attempt to gather \n" +
"\tcurrent database statistics will be made unless -lookup is\n" +
"\talso passed\n" +
"\tDefault is to start from the earliest date records exist for\n" +
"-end [YYYY-MM-DD]\n" +
"\tOptional\n" +
"\tSpecify the end date of the analysis\n" +
"\tIf an end date is specified then no attempt to gather \n" +
"\tcurrent database statistics will be made unless -lookup is\n" +
"\talso passed\n" +
"\tDefault is to work up to the last date records exist for\n" +
"-lookup\n" +
"\tOptional\n" +
"\tForce a lookup of the current database statistics\n" +
"\tOnly needs to be used if date constraints are also in place\n" +
"-help\n" +
"\tdisplay this usage information\n";
System.out.println(usage);
}
}
| Java |
/**
* The contents of this file are subject to the license and copyright
* detailed in the LICENSE and NOTICE files at the root of the source
* tree and available online at
*
* http://www.dspace.org/license/
*/
package org.dspace.app.statistics;
import java.text.NumberFormat;
/**
* This class provides a number of tools that may be useful to the methods
* which generate the different types of report
*
* @author Richard Jones
*/
public class ReportTools
{
/**
* method to take the given integer and produce a string to be used in
* the display of the report. Basically provides an interface for a
* standard NumberFormat class, but without the hassle of instantiating
* and localising it.
*
* @param number the integer to be formatted
*
* @return a string containing the formatted number
*/
public static String numberFormat(int number)
{
NumberFormat nf = NumberFormat.getIntegerInstance();
return nf.format((double) number);
}
}
| Java |
/**
* The contents of this file are subject to the license and copyright
* detailed in the LICENSE and NOTICE files at the root of the source
* tree and available online at
*
* http://www.dspace.org/license/
*/
package org.dspace.app.statistics;
import java.io.BufferedReader;
import java.io.File;
import java.io.FileReader;
import java.io.IOException;
import java.sql.SQLException;
import java.text.ParseException;
import java.text.SimpleDateFormat;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Calendar;
import java.util.Date;
import java.util.GregorianCalendar;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.StringTokenizer;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import org.dspace.content.DCValue;
import org.dspace.content.Item;
import org.dspace.core.ConfigurationManager;
import org.dspace.core.Context;
import org.dspace.handle.HandleManager;
/**
* This class performs the action of coordinating a usage report being
* generated using the standard internal aggregation file format as a basis.
* All it's configuration information must come from that file. There is the
* opportunity for different output format options such as HTML.
*
* Use the -help flag for more information
*
* @author Richard Jones
*/
public class ReportGenerator
{
// set up our class globals
/////////////////
// aggregators
/////////////////
/** aggregator for all actions performed in the system */
private static Map<String, String> actionAggregator;
/** aggregator for all searches performed */
private static Map<String, String> searchAggregator;
/** aggregator for user logins */
private static Map<String, String> userAggregator;
/** aggregator for item views */
private static Map<String, String> itemAggregator;
/** aggregator for current archive state statistics */
private static Map<String, String> archiveStats;
//////////////////
// statistics config data
//////////////////
/** bottom limit to output for search word analysis */
private static int searchFloor;
/** bottom limit to output for item view analysis */
private static int itemFloor;
/** number of items from most popular to be looked up in the database */
private static int itemLookup;
/** mode to use for user email display */
private static String userEmail;
/** URL of the service being analysed */
private static String url;
/** Name of the service being analysed */
private static String name;
/** average number of views per item */
private static int avgItemViews;
/** name of the server being analysed */
private static String serverName;
/** start date of this report */
private static Date startDate = null;
/** end date of this report */
private static Date endDate = null;
/** the time taken to build the aggregation file from the log */
private static int processTime;
/** the number of log lines analysed */
private static int logLines;
/** the number of warnings encountered */
private static int warnings;
/** the list of results to be displayed in the general summary */
private static List<String> generalSummary;
//////////////////
// regular expressions
//////////////////
/** pattern that matches an unqualified aggregator property */
private static Pattern real = Pattern.compile("^(.+)=(.+)");
//////////////////////////
// Miscellaneous variables
//////////////////////////
/** process timing clock */
private static Calendar startTime = null;
/** a map from log file action to human readable action */
private static Map<String, String> actionMap = null;
/////////////////
// report generator config data
////////////////
/** the input file to build the report from */
private static String input = null;
/** the log file action to human readable action map */
private static String map = ConfigurationManager.getProperty("dspace.dir") +
File.separator + "config" + File.separator + "dstat.map";
/**
* main method to be run from command line. See usage information for
* details as to how to use the command line flags
*/
public static void main(String [] argv)
throws Exception, SQLException
{
// create context as super user
Context context = new Context();
context.setIgnoreAuthorization(true);
String myFormat = null;
String myInput = null;
String myOutput = null;
String myMap = null;
// read in our command line options
for (int i = 0; i < argv.length; i++)
{
if (argv[i].equals("-format"))
{
myFormat = argv[i+1].toLowerCase();
}
if (argv[i].equals("-in"))
{
myInput = argv[i+1];
}
if (argv[i].equals("-out"))
{
myOutput = argv[i+1];
}
if (argv[i].equals("-map"))
{
myMap = argv[i+1];
}
if (argv[i].equals("-help"))
{
usage();
System.exit(0);
}
}
processReport(context, myFormat, myInput, myOutput, myMap);
}
/**
* using the pre-configuration information passed here, read in the
* aggregation data and output a file containing the report in the
* requested format
*
* this method is retained for backwards compatibility, but delegates the actual
* wprk to a new method
*
* @param context the DSpace context in which this action is performed
* @param myFormat the desired output format (currently on HTML supported)
* @param myInput the aggregation file to be turned into a report
* @param myOutput the file into which to write the report
*/
public static void processReport(Context context, String myFormat,
String myInput, String myOutput,
String myMap)
throws Exception, SQLException
{
if (myMap != null)
{
map = myMap;
}
// create the relevant report type
// FIXME: at the moment we only support HTML report generation
Report report = null;
if (myFormat.equals("html"))
{
report = new HTMLReport();
((HTMLReport)report).setOutput(myOutput);
}
if (report == null)
{
throw new IllegalStateException("Must specify a valid report format");
}
ReportGenerator.processReport(context, report, myInput);
}
/**
* using the pre-configuration information passed here, read in the
* aggregation data and output a file containing the report in the
* requested format
*/
public static void processReport(Context context, Report report,
String myInput)
throws Exception, SQLException
{
startTime = new GregorianCalendar();
/** instantiate aggregators */
actionAggregator = new HashMap<String, String>();
searchAggregator = new HashMap<String, String>();
userAggregator = new HashMap<String, String>();
itemAggregator = new HashMap<String, String>();
archiveStats = new HashMap<String, String>();
actionMap = new HashMap<String, String>();
/** instantite lists */
generalSummary = new ArrayList<String>();
// set the parameters for this analysis
setParameters(myInput);
// read the input file
readInput(input);
// load the log file action to human readable action map
readMap(map);
report.setStartDate(startDate);
report.setEndDate(endDate);
report.setMainTitle(name, serverName);
// define our standard variables for re-use
// FIXME: we probably don't need these once we've finished re-factoring
Iterator<String> keys = null;
int i = 0;
String explanation = null;
int value;
// FIXME: All of these sections should probably be buried in their own
// custom methods
Statistics overview = new Statistics();
overview.setSectionHeader("General Overview");
Iterator<String> summaryEntries = generalSummary.iterator();
while (summaryEntries.hasNext())
{
String entry = summaryEntries.next();
if (actionAggregator.containsKey(entry))
{
int count = Integer.parseInt(actionAggregator.get(entry));
overview.add(new Stat(translate(entry), count));
}
}
report.addBlock(overview);
// prepare the archive statistics package
if (archiveStats.size() > 0)
{
Statistics archiveInfo = prepareStats(archiveStats, true, false);
archiveInfo.setSectionHeader("Archive Information");
archiveInfo.setStatName("Content Type");
archiveInfo.setResultName("Number of items");
report.addBlock(archiveInfo);
}
// process the items in preparation to be displayed. This includes sorting
// by view number, building the links, and getting further info where
// necessary
Statistics viewedItems = new Statistics("Item/Handle", "Number of views", itemFloor);
viewedItems.setSectionHeader("Items Viewed");
Stat[] items = new Stat[itemAggregator.size()];
keys = itemAggregator.keySet().iterator();
i = 0;
while (keys.hasNext())
{
String key = keys.next();
String link = url + "handle/" + key;
value = Integer.parseInt(itemAggregator.get(key));
items[i] = new Stat(key, value, link);
i++;
}
Arrays.sort(items);
String info = null;
for (i = 0; i < items.length; i++)
{
// Allow negative value to say that all items should be looked up
if (itemLookup < 0 || i < itemLookup)
{
info = getItemInfo(context, items[i].getKey());
}
// if we get something back from the db then set it as the key,
// else just use the link
if (info != null)
{
items[i].setKey(info + " (" + items[i].getKey() + ")");
}
else
{
items[i].setKey(items[i].getReference());
}
// reset the info register
info = null;
}
viewedItems.add(items);
report.addBlock(viewedItems);
// prepare a report of the full action statistics
Statistics fullInfo = prepareStats(actionAggregator, true, true);
fullInfo.setSectionHeader("All Actions Performed");
fullInfo.setStatName("Action");
fullInfo.setResultName("Number of times");
report.addBlock(fullInfo);
// prepare the user login statistics package
if (!userEmail.equals("off"))
{
Statistics userLogins = prepareStats(userAggregator, true, false);
userLogins.setSectionHeader("User Logins");
userLogins.setStatName("User");
userLogins.setResultName("Number of logins");
if (userEmail.equals("alias"))
{
explanation = "(distinct addresses)";
userLogins.setExplanation(explanation);
}
report.addBlock(userLogins);
}
// prepare the search word statistics package
Statistics searchWords = prepareStats(searchAggregator, true, false);
searchWords.setSectionHeader("Words Searched");
searchWords.setStatName("Word");
searchWords.setResultName("Number of searches");
searchWords.setFloor(searchFloor);
report.addBlock(searchWords);
// FIXME: because this isn't an aggregator it can't be passed to
// prepareStats; should we overload this method for use with this kind
// of data?
// prepare the average item views statistics
if (avgItemViews > 0)
{
Statistics avg = new Statistics();
avg.setSectionHeader("Averaging Information");
Stat[] average = new Stat[1];
average[0] = new Stat("Average views per item", avgItemViews);
avg.add(average);
report.addBlock(avg);
}
// prepare the log line level statistics
// FIXME: at the moment we only know about warnings, but future versions
// should aggregate all log line levels and display here
Statistics levels = new Statistics("Level", "Number of lines");
levels.setSectionHeader("Log Level Information");
Stat[] level = new Stat[1];
level[0] = new Stat("Warnings", warnings);
levels.add(level);
report.addBlock(levels);
// get the display processing time information
Calendar endTime = new GregorianCalendar();
long timeInMillis = (endTime.getTimeInMillis() - startTime.getTimeInMillis());
int outputProcessTime = (int) (timeInMillis / 1000);
// prepare the processing information statistics
Statistics process = new Statistics("Operation", "");
process.setSectionHeader("Processing Information");
Stat[] proc = new Stat[3];
proc[0] = new Stat("Log Processing Time", processTime);
proc[0].setUnits("seconds");
proc[1] = new Stat("Output Processing Time", outputProcessTime);
proc[1].setUnits("seconds");
proc[2] = new Stat("Log File Lines Analysed", logLines);
proc[2].setUnits("lines");
process.add(proc);
report.addBlock(process);
report.render();
return;
}
/**
* a standard stats block preparation method for use when an aggregator
* has to be put out in its entirity. This method will not be able to
* deal with complex cases, although it will perform sorting by value and
* translations as per the map file if requested
*
* @param aggregator the aggregator that should be converted
* @param sort should the resulting stats be sorted by value
* @param translate translate the stat name using the map file
*
* @return a Statistics object containing all the relevant information
*/
public static Statistics prepareStats(Map<String, String> aggregator, boolean sort, boolean translate)
{
Stat[] stats = new Stat[aggregator.size()];
if (aggregator.size() > 0)
{
int i = 0;
for (Map.Entry<String, String> aggregatorEntry : aggregator.entrySet())
{
String key = aggregatorEntry.getKey();
int value = Integer.parseInt(aggregatorEntry.getValue());
if (translate)
{
stats[i] = new Stat(translate(key), value);
}
else
{
stats[i] = new Stat(key, value);
}
i++;
}
if (sort)
{
Arrays.sort(stats);
}
}
// add the results to the statistics object
Statistics statistics = new Statistics();
statistics.add(stats);
return statistics;
}
/**
* look the given text up in the action map table and return a translated
* value if one exists. If no translation exists the original text is
* returned
*
* @param text the text to be translated
*
* @return a string containing either the translated text or the original
* text
*/
public static String translate(String text)
{
if (actionMap.containsKey(text))
{
return actionMap.get(text);
}
else
{
return text;
}
}
/**
* read in the action map file which converts log file line actions into
* actions which are more understandable to humans
*
* @param map the map file
*/
public static void readMap(String map)
throws IOException
{
FileReader fr = null;
BufferedReader br = null;
try
{
// read in the map file, printing a warning if none is found
String record = null;
try
{
fr = new FileReader(map);
br = new BufferedReader(fr);
}
catch (IOException e)
{
System.err.println("Failed to read map file: log file actions will be displayed without translation");
return;
}
// loop through the map file and read in the values
while ((record = br.readLine()) != null)
{
Matcher matchReal = real.matcher(record);
// if the line is real then read it in
if (matchReal.matches())
{
actionMap.put(matchReal.group(1).trim(), matchReal.group(2).trim());
}
}
}
finally
{
if (br != null)
{
try
{
br.close();
}
catch (IOException ioe)
{
}
}
if (fr != null)
{
try
{
fr.close();
}
catch (IOException ioe)
{
}
}
}
}
/**
* set the passed parameters up as global class variables. This has to
* be done in a separate method because the API permits for running from
* the command line with args or calling the processReport method statically
* from elsewhere
*
* @param myInput regex for log file names
*/
public static void setParameters(String myInput)
{
if (myInput != null)
{
input = myInput;
}
return;
}
/**
* read the input file and populate all the class globals with the contents
* The values that come from this file form the basis of the analysis report
*
* @param input the aggregator file
*/
public static void readInput(String input)
throws IOException, ParseException
{
FileReader fr = null;
BufferedReader br = null;
// read in the analysis information, throwing an error if we fail to open
// the given file
String record = null;
try
{
fr = new FileReader(input);
br = new BufferedReader(fr);
}
catch (IOException e)
{
System.out.println("Failed to read input file: " + input);
return;
}
// first initialise a date format object to do our date processing
// if necessary
SimpleDateFormat sdf = new SimpleDateFormat("dd'/'MM'/'yyyy");
// FIXME: although this works, it is not very elegant
// loop through the aggregator file and read in the values
while ((record = br.readLine()) != null)
{
// match real lines
Matcher matchReal = real.matcher(record);
// pre-prepare our input strings
String section = null;
String key = null;
String value = null;
// temporary string to hold the left hand side of the equation
String left = null;
// match the line or skip this record
if (matchReal.matches())
{
// lift the values out of the matcher's result groups
left = matchReal.group(1).trim();
value = matchReal.group(2).trim();
// now analyse the left hand side, splitting by ".", taking the
// first token as the section and the remainder of the string
// as they key if it exists
StringTokenizer tokens = new StringTokenizer(left, ".");
int numTokens = tokens.countTokens();
if (tokens.hasMoreTokens())
{
section = tokens.nextToken();
if (numTokens > 1)
{
key = left.substring(section.length() + 1);
}
else
{
key = "";
}
}
}
else
{
continue;
}
// if the line is real, then we carry on
// read the analysis contents in
if ("archive".equals(section))
{
archiveStats.put(key, value);
}
else if ("action".equals(section))
{
actionAggregator.put(key, value);
}
else if ("user".equals(section))
{
userAggregator.put(key, value);
}
else if ("search".equals(section))
{
searchAggregator.put(key, value);
}
else if ("item".equals(section))
{
itemAggregator.put(key, value);
}
else if ("user_email".equals(section))
{
userEmail = value;
}
else if ("item_floor".equals(section))
{
itemFloor = Integer.parseInt(value);
}
else if ("search_floor".equals(section))
{
searchFloor = Integer.parseInt(value);
}
else if ("host_url".equals(section))
{
url = value;
}
else if ("item_lookup".equals(section))
{
itemLookup = Integer.parseInt(value);
}
else if ("avg_item_views".equals(section))
{
try
{
avgItemViews = Integer.parseInt(value);
}
catch (NumberFormatException e)
{
avgItemViews = 0;
}
}
else if ("server_name".equals(section))
{
serverName = value;
}
else if ("service_name".equals(section))
{
name = value;
}
else if ("start_date".equals(section))
{
startDate = sdf.parse(value);
}
else if ("end_date".equals(section))
{
endDate = sdf.parse(value);
}
else if ("analysis_process_time".equals(section))
{
processTime = Integer.parseInt(value);
}
else if ("general_summary".equals(section))
{
generalSummary.add(value);
}
else if ("log_lines".equals(section))
{
logLines = Integer.parseInt(value);
}
else if ("warnings".equals(section))
{
warnings = Integer.parseInt(value);
}
}
// close the inputs
br.close();
fr.close();
}
/**
* get the information for the item with the given handle
*
* @param context the DSpace context we are operating under
* @param handle the handle of the item being looked up, in the form
* 1234/567 and so forth
*
* @return a string containing a reference (almost citation) to the
* article
*/
public static String getItemInfo(Context context, String handle)
throws SQLException
{
Item item = null;
// ensure that the handle exists
try
{
item = (Item) HandleManager.resolveToObject(context, handle);
}
catch (Exception e)
{
return null;
}
// if no handle that matches is found then also return null
if (item == null)
{
return null;
}
// build the referece
// FIXME: here we have blurred the line between content and presentation
// and it should probably be un-blurred
DCValue[] title = item.getDC("title", null, Item.ANY);
DCValue[] author = item.getDC("contributor", "author", Item.ANY);
StringBuffer authors = new StringBuffer();
if (author.length > 0)
{
authors.append("(" + author[0].value);
}
if (author.length > 1)
{
authors.append(" et al");
}
if (author.length > 0)
{
authors.append(")");
}
String content = title[0].value + " " + authors.toString();
return content;
}
/**
* output the usage information to the terminal
*/
public static void usage()
{
String usage = "Usage Information:\n" +
"ReportGenerator [options [parameters]]\n" +
"-format [output format]\n" +
"\tRequired\n" +
"\tSpecify the format that you would like the output in\n" +
"\tOptions:\n" +
"\t\thtml\n" +
"-in [aggregation file]\n" +
"\tRequired\n" +
"\tSpecify the aggregation data file to display\n" +
"-out [output file]\n" +
"\tOptional\n" +
"\tSpecify the file to output the report to\n" +
"\tDefault uses [dspace log directory]/report\n" +
"-map [map file]\n" +
"\tOptional\n" +
"\tSpecify the map file to translate log file actions into human readable actions\n" +
"\tDefault uses [dspace config directory]/dstat.map\n" +
"-help\n" +
"\tdisplay this usage information\n";
System.out.println(usage);
}
}
| Java |
/**
* The contents of this file are subject to the license and copyright
* detailed in the LICENSE and NOTICE files at the root of the source
* tree and available online at
*
* http://www.dspace.org/license/
*/
package org.dspace.app.statistics;
import org.dspace.app.statistics.Statistics;
import java.util.Date;
/**
* Sn interface to a generic report generating
* class, and to provide the polymorphism necessary to allow the report
* generator to generate any number of different formats of report
*
* Note: This used to be an abstract class, but has been made an interface as there wasn't
* any logic contained within it. It's also been made public, so that you can create a Report
* type without monkeying about in the statistics package.
*
* @author Richard Jones
*/
public interface Report
{
/**
* output any top headers that this page needs
*
* @return a string containing the header for the report
*/
public abstract String header();
/**
* output any top headers that this page needs
*
* @param title the title of the report, useful for email subjects or
* HTML headers
*
* @return a string containing the header for the report
*/
public abstract String header(String title);
/**
* output the title in the relevant format. This requires that the title
* has been set with setMainTitle()
*
* @return a string containing the title of the report
*/
public abstract String mainTitle();
/**
* output the date range in the relevant format. This requires that the
* date ranges have been set using setStartDate() and setEndDate()
*
* @return a string containing date range information
*/
public abstract String dateRange();
/**
* output the section header in the relevant format
*
* @param title the title of the current section header
*
* @return a string containing the formatted section header
*/
public abstract String sectionHeader(String title);
/**
* output the report block based on the passed statistics object array
*
* @param content a statistics object to form the basis of the displayed
* stat block
*
* @return a string containing the formatted statistics block
*/
public abstract String statBlock(Statistics content);
/**
* output the floor information in the relevant format
*
* @param floor the floor value for the statistics block
*
* @return a string containing the formatted floor information
*/
public abstract String floorInfo(int floor);
/**
* output the explanation of the stat block in the relevant format
*
* @param explanation the explanatory or clarification text for the stats
*
* @return a string containing the formatted explanation
*/
public abstract String blockExplanation(String explanation);
/**
* output the final footers for this file
*
* @return a string containing the report footer
*/
public abstract String footer();
/**
* set the main title for the report
*
* @param name the name of the service
* @param serverName the name of the server
*/
public abstract void setMainTitle (String name, String serverName);
/**
* add a statistics block to the report to the class register
*
* @param stat the statistics object to be added to the report
*/
public abstract void addBlock(Statistics stat);
/**
* render the report
*
* @return a string containing the full content of the report
*/
public abstract String render();
/**
* set the starting date for the report
*
* @param start the start date for the report
*/
public abstract void setStartDate(Date start);
/**
* set the end date for the report
*
* @param end the end date for the report
*/
public abstract void setEndDate(Date end);
}
| Java |
/**
* The contents of this file are subject to the license and copyright
* detailed in the LICENSE and NOTICE files at the root of the source
* tree and available online at
*
* http://www.dspace.org/license/
*/
package org.dspace.app.statistics;
import org.apache.commons.lang.time.DateUtils;
import org.dspace.core.ConfigurationManager;
import java.util.*;
import java.util.regex.Pattern;
import java.util.regex.Matcher;
import java.io.File;
import java.io.FilenameFilter;
import java.text.SimpleDateFormat;
import java.text.ParseException;
/**
* Helper class for loading the analysis / report files from the reports directory
*/
public class StatisticsLoader
{
private static Map<String, StatsFile> monthlyAnalysis = null;
private static Map<String, StatsFile> monthlyReports = null;
private static StatsFile generalAnalysis = null;
private static StatsFile generalReport = null;
private static Date lastLoaded = null;
private static int fileCount = 0;
private static Pattern analysisMonthlyPattern;
private static Pattern analysisGeneralPattern;
private static Pattern reportMonthlyPattern;
private static Pattern reportGeneralPattern;
private static SimpleDateFormat monthlySDF;
private static SimpleDateFormat generalSDF;
// one time initialisation of the regex patterns and formatters we will use
static
{
analysisMonthlyPattern = Pattern.compile("dspace-log-monthly-([0-9][0-9][0-9][0-9]-[0-9]+)\\.dat");
analysisGeneralPattern = Pattern.compile("dspace-log-general-([0-9]+-[0-9]+-[0-9]+)\\.dat");
reportMonthlyPattern = Pattern.compile("report-([0-9][0-9][0-9][0-9]-[0-9]+)\\.html");
reportGeneralPattern = Pattern.compile("report-general-([0-9]+-[0-9]+-[0-9]+)\\.html");
monthlySDF = new SimpleDateFormat("yyyy'-'M");
generalSDF = new SimpleDateFormat("yyyy'-'M'-'dd");
}
/**
* Get an array of the dates of the report files
* @return
*/
public static Date[] getMonthlyReportDates()
{
return sortDatesDescending(getDatesFromMap(monthlyReports));
}
/**
* Get an array of the dates of the analysis files
* @return
*/
public static Date[] getMonthlyAnalysisDates()
{
return sortDatesDescending(getDatesFromMap(monthlyAnalysis));
}
/**
* Convert the formatted dates that are the keys of the map into a date array
* @param monthlyMap
* @return
*/
protected static Date[] getDatesFromMap(Map<String, StatsFile> monthlyMap)
{
Set<String> keys = monthlyMap.keySet();
Date[] dates = new Date[keys.size()];
int i = 0;
for (String date : keys)
{
try
{
dates[i] = monthlySDF.parse(date);
}
catch (ParseException pe)
{
}
i++;
}
return dates;
}
/**
* Sort the date array in descending (reverse chronological) order
* @param dates
* @return
*/
protected static Date[] sortDatesDescending(Date[] dates)
{
Arrays.sort(dates, new Comparator<Date>() {
public int compare(Date d1, Date d2)
{
if (d1 == null && d2 == null)
{
return 0;
}
else if (d1 == null)
{
return -1;
}
else if (d2 == null)
{
return 1;
}
else if (d1.before(d2))
{
return 1;
}
else if (d2.before(d1))
{
return -1;
}
return 0;
}
});
return dates;
}
/**
* Get the analysis file for a given date
* @param date
* @return
*/
public static File getAnalysisFor(String date)
{
StatisticsLoader.syncFileList();
StatsFile sf = (monthlyAnalysis == null ? null : monthlyAnalysis.get(date));
return sf == null ? null : sf.file;
}
/**
* Get the report file for a given date
* @param date
* @return
*/
public static File getReportFor(String date)
{
StatisticsLoader.syncFileList();
StatsFile sf = (monthlyReports == null ? null : monthlyReports.get(date));
return sf == null ? null : sf.file;
}
/**
* Get the current general analysis file
* @return
*/
public static File getGeneralAnalysis()
{
StatisticsLoader.syncFileList();
return generalAnalysis == null ? null : generalAnalysis.file;
}
/**
* Get the current general report file
* @return
*/
public static File getGeneralReport()
{
StatisticsLoader.syncFileList();
return generalReport == null ? null : generalReport.file;
}
/**
* Synchronize the cached list of analysis / report files with the reports directory
*
* We synchronize if:
*
* 1) The number of files is different (ie. files have been added or removed)
* 2) We haven't cached anything yet
* 3) The cache was last generate over an hour ago
*/
private static void syncFileList()
{
// Get an array of all the analysis and report files present
File[] fileList = StatisticsLoader.getAnalysisAndReportFileList();
if (fileList != null && fileList.length != fileCount)
{
StatisticsLoader.loadFileList(fileList);
}
else if (lastLoaded == null)
{
StatisticsLoader.loadFileList(fileList);
}
else if (DateUtils.addHours(lastLoaded, 1).before(new Date()))
{
StatisticsLoader.loadFileList(fileList);
}
}
/**
* Generate the cached file list from the array of files
* @param fileList
*/
private static synchronized void loadFileList(File[] fileList)
{
// If we haven't been passed an array of files, get one now
if (fileList == null || fileList.length == 0)
{
fileList = StatisticsLoader.getAnalysisAndReportFileList();
}
// Create new maps for the monthly analysis / reports
Map<String, StatsFile> newMonthlyAnalysis = new HashMap<String, StatsFile>();
Map<String, StatsFile> newMonthlyReports = new HashMap<String, StatsFile>();
StatsFile newGeneralAnalysis = null;
StatsFile newGeneralReport = null;
if (fileList != null)
{
for (File thisFile : fileList)
{
StatsFile statsFile = null;
// If we haven't identified this file yet
if (statsFile == null)
{
// See if it is a monthly analysis file
statsFile = makeStatsFile(thisFile, analysisMonthlyPattern, monthlySDF);
if (statsFile != null)
{
// If it is, add it to the map
newMonthlyAnalysis.put(statsFile.dateStr, statsFile);
}
}
// If we haven't identified this file yet
if (statsFile == null)
{
// See if it is a monthly report file
statsFile = makeStatsFile(thisFile, reportMonthlyPattern, monthlySDF);
if (statsFile != null)
{
// If it is, add it to the map
newMonthlyReports.put(statsFile.dateStr, statsFile);
}
}
// If we haven't identified this file yet
if (statsFile == null)
{
// See if it is a general analysis file
statsFile = makeStatsFile(thisFile, analysisGeneralPattern, generalSDF);
if (statsFile != null)
{
// If it is, ensure that we are pointing to the most recent file
if (newGeneralAnalysis == null || statsFile.date.after(newGeneralAnalysis.date))
{
newGeneralAnalysis = statsFile;
}
}
}
// If we haven't identified this file yet
if (statsFile == null)
{
// See if it is a general report file
statsFile = makeStatsFile(thisFile, reportGeneralPattern, generalSDF);
if (statsFile != null)
{
// If it is, ensure that we are pointing to the most recent file
if (newGeneralReport == null || statsFile.date.after(newGeneralReport.date))
{
newGeneralReport = statsFile;
}
}
}
}
}
// Store the newly discovered values in the member cache
monthlyAnalysis = newMonthlyAnalysis;
monthlyReports = newMonthlyReports;
generalAnalysis = newGeneralAnalysis;
generalReport = newGeneralReport;
lastLoaded = new Date();
}
/**
* Generate a StatsFile entry for this file. The pattern and date formatters are used to
* identify the file as a particular type, and extract the relevant information.
* If the file is not identified by the formatter provided, then we return null
* @param thisFile
* @param thisPattern
* @param sdf
* @return
*/
private static StatsFile makeStatsFile(File thisFile, Pattern thisPattern, SimpleDateFormat sdf)
{
Matcher matcher = thisPattern.matcher(thisFile.getName());
if (matcher.matches())
{
StatsFile sf = new StatsFile();
sf.file = thisFile;
sf.path = thisFile.getPath();
sf.dateStr = matcher.group(1).trim();
try
{
sf.date = sdf.parse(sf.dateStr);
}
catch (ParseException e)
{
}
return sf;
}
return null;
}
/**
* Get an array of all the analysis and report files
* @return
*/
private static File[] getAnalysisAndReportFileList()
{
File reportDir = new File(ConfigurationManager.getProperty("log.dir"));
if (reportDir != null)
{
return reportDir.listFiles(new AnalysisAndReportFilter());
}
return null;
}
/**
* Simple class for holding information about an analysis/report file
*/
private static class StatsFile
{
File file;
String path;
Date date;
String dateStr;
}
/**
* Filter used to restrict files in the reports directory to just analysis or report types
*/
private static class AnalysisAndReportFilter implements FilenameFilter
{
public boolean accept(File dir, String name)
{
if (analysisMonthlyPattern.matcher(name).matches())
{
return true;
}
if (analysisGeneralPattern.matcher(name).matches())
{
return true;
}
if (reportMonthlyPattern.matcher(name).matches())
{
return true;
}
if (reportGeneralPattern.matcher(name).matches())
{
return true;
}
return false;
}
}
}
| Java |
/**
* The contents of this file are subject to the license and copyright
* detailed in the LICENSE and NOTICE files at the root of the source
* tree and available online at
*
* http://www.dspace.org/license/
*/
/**
* <p>Tools for exporting and importing DSpace objects (Community, Collection,
* Item, etc.) wrapped in various kinds of packaging.</p>
*
* @see org.dspace.content.packager
*/
package org.dspace.app.packager;
| Java |
/**
* The contents of this file are subject to the license and copyright
* detailed in the LICENSE and NOTICE files at the root of the source
* tree and available online at
*
* http://www.dspace.org/license/
*/
package org.dspace.app.packager;
import java.io.BufferedReader;
import java.io.File;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.InputStreamReader;
import java.sql.SQLException;
import java.util.List;
import org.apache.commons.cli.CommandLine;
import org.apache.commons.cli.CommandLineParser;
import org.apache.commons.cli.HelpFormatter;
import org.apache.commons.cli.Options;
import org.apache.commons.cli.PosixParser;
import org.dspace.authorize.AuthorizeException;
import org.dspace.content.DSpaceObject;
import org.dspace.content.crosswalk.CrosswalkException;
import org.dspace.content.packager.PackageDisseminator;
import org.dspace.content.packager.PackageException;
import org.dspace.content.packager.PackageParameters;
import org.dspace.content.packager.PackageIngester;
import org.dspace.core.Constants;
import org.dspace.core.Context;
import org.dspace.core.PluginManager;
import org.dspace.eperson.EPerson;
import org.dspace.handle.HandleManager;
/**
* Command-line interface to the Packager plugin.
* <p>
* This class ONLY exists to provide a CLI for the packager plugins. It does not
* "manage" the plugins and it is not called from within DSpace, but the name
* follows a DSpace convention.
* <p>
* It can invoke one of the Submission (SIP) packagers to create a new DSpace
* Item out of a package, or a Dissemination (DIP) packager to write an Item out
* as a package.
* <p>
* Usage is as follows:<br>
* (Add the -h option to get the command to show its own help)
*
* <pre>
* 1. To submit a SIP (submissions tend to create a *new* object, with a new handle. If you want to restore an object, see -r option below)
* dspace packager
* -e {ePerson}
* -t {PackagerType}
* -p {parent-handle} [ -p {parent2} ...]
* [-o {name}={value} [ -o {name}={value} ..]]
* [-a] --- also recursively ingest all child packages of the initial package
* (child pkgs must be referenced from parent pkg)
* [-w] --- skip Workflow
* {package-filename}
*
* {PackagerType} must match one of the aliases of the chosen Packager
* plugin.
*
* The "-w" option circumvents Workflow, and is optional. The "-o"
* option, which may be repeated, passes options to the packager
* (e.g. "metadataOnly" to a DIP packager).
*
* 2. To restore an AIP (similar to submit mode, but attempts to restore with the handles/parents specified in AIP):
* dspace packager
* -r --- restores a object from a package info, including the specified handle (will throw an error if handle is already in use)
* -e {ePerson}
* -t {PackagerType}
* [-o {name}={value} [ -o {name}={value} ..]]
* [-a] --- also recursively restore all child packages of the initial package
* (child pkgs must be referenced from parent pkg)
* [-k] --- Skip over errors where objects already exist and Keep Existing objects by default.
* Use with -r to only restore objects which do not already exist. By default, -r will throw an error
* and rollback all changes when an object is found that already exists.
* [-f] --- Force a restore (even if object already exists).
* Use with -r to replace an existing object with one from a package (essentially a delete and restore).
* By default, -r will throw an error and rollback all changes when an object is found that already exists.
* [-i {identifier-handle-of-object}] -- Optional when -f is specified. When replacing an object, you can specify the
* object to replace if it cannot be easily determined from the package itself.
* {package-filename}
*
* Restoring is very similar to submitting, except that you are recreating pre-existing objects. So, in a restore, the object(s) are
* being recreated based on the details in the AIP. This means that the object is recreated with the same handle and same parent/children
* objects. Not all {PackagerTypes} may support a "restore".
*
* 3. To write out a DIP:
* dspace packager
* -d
* -e {ePerson}
* -t {PackagerType}
* -i {identifier-handle-of-object}
* [-a] --- also recursively disseminate all child objects of this object
* [-o {name}={value} [ -o {name}={value} ..]]
* {package-filename}
*
* The "-d" switch chooses a Dissemination packager, and is required.
* The "-o" option, which may be repeated, passes options to the packager
* (e.g. "metadataOnly" to a DIP packager).
* </pre>
*
* Note that {package-filename} may be "-" for standard input or standard
* output, respectively.
*
* @author Larry Stone
* @author Tim Donohue
* @version $Revision: 5844 $
*/
public class Packager
{
/* Various private global settings/options */
private String packageType = null;
private boolean submit = true;
private boolean userInteractionEnabled = true;
// die from illegal command line
private static void usageError(String msg)
{
System.out.println(msg);
System.out.println(" (run with -h flag for details)");
System.exit(1);
}
public static void main(String[] argv) throws Exception
{
Options options = new Options();
options.addOption("p", "parent", true,
"Handle(s) of parent Community or Collection into which to ingest object (repeatable)");
options.addOption("e", "eperson", true,
"email address of eperson doing importing");
options
.addOption(
"w",
"install",
false,
"disable workflow; install immediately without going through collection's workflow");
options.addOption("r", "restore", false, "ingest in \"restore\" mode. Restores a missing object based on the contents in a package.");
options.addOption("k", "keep-existing", false, "if an object is found to already exist during a restore (-r), then keep the existing object and continue processing. Can only be used with '-r'. This avoids object-exists errors which are thrown by -r by default.");
options.addOption("f", "force-replace", false, "if an object is found to already exist during a restore (-r), then remove it and replace it with the contents of the package. Can only be used with '-r'. This REPLACES the object(s) in the repository with the contents from the package(s).");
options.addOption("t", "type", true, "package type or MIMEtype");
options
.addOption("o", "option", true,
"Packager option to pass to plugin, \"name=value\" (repeatable)");
options.addOption("d", "disseminate", false,
"Disseminate package (output); default is to submit.");
options.addOption("s", "submit", false,
"Submission package (Input); this is the default. ");
options.addOption("i", "identifier", true, "Handle of object to disseminate.");
options.addOption("a", "all", false, "also recursively ingest/disseminate any child packages, e.g. all Items within a Collection (not all packagers may support this option!)");
options.addOption("h", "help", false, "help (you may also specify '-h -t [type]' for additional help with a specific type of packager)");
options.addOption("u", "no-user-interaction", false, "Skips over all user interaction (i.e. [y/n] question prompts) within this script. This flag can be used if you want to save (pipe) a report of all changes to a file, and therefore need to bypass all user interaction.");
CommandLineParser parser = new PosixParser();
CommandLine line = parser.parse(options, argv);
String sourceFile = null;
String eperson = null;
String[] parents = null;
String identifier = null;
PackageParameters pkgParams = new PackageParameters();
//initialize a new packager -- we'll add all our current params as settings
Packager myPackager = new Packager();
if (line.hasOption('h'))
{
HelpFormatter myhelp = new HelpFormatter();
myhelp.printHelp("Packager [options] package-file|-\n",
options);
//If user specified a type, also print out the SIP and DIP options
// that are specific to that type of packager
if (line.hasOption('t'))
{
System.out.println("\n--------------------------------------------------------------");
System.out.println("Additional options for the " + line.getOptionValue('t') + " packager:");
System.out.println("--------------------------------------------------------------");
System.out.println("(These options may be specified using --option as described above)");
PackageIngester sip = (PackageIngester) PluginManager
.getNamedPlugin(PackageIngester.class, line.getOptionValue('t'));
if (sip != null)
{
System.out.println("\n\n" + line.getOptionValue('t') + " Submission (SIP) plugin options:\n");
System.out.println(sip.getParameterHelp());
}
else
{
System.out.println("\nNo valid Submission plugin found for " + line.getOptionValue('t') + " type.");
}
PackageDisseminator dip = (PackageDisseminator) PluginManager
.getNamedPlugin(PackageDisseminator.class, line.getOptionValue('t'));
if (dip != null)
{
System.out.println("\n\n" + line.getOptionValue('t') + " Dissemination (DIP) plugin options:\n");
System.out.println(dip.getParameterHelp());
}
else
{
System.out.println("\nNo valid Dissemination plugin found for " + line.getOptionValue('t') + " type.");
}
}
else //otherwise, display list of valid packager types
{
System.out.println("\nAvailable Submission Package (SIP) types:");
String pn[] = PluginManager
.getAllPluginNames(PackageIngester.class);
for (int i = 0; i < pn.length; ++i)
{
System.out.println(" " + pn[i]);
}
System.out
.println("\nAvailable Dissemination Package (DIP) types:");
pn = PluginManager.getAllPluginNames(PackageDisseminator.class);
for (int i = 0; i < pn.length; ++i)
{
System.out.println(" " + pn[i]);
}
}
System.exit(0);
}
//look for flag to disable all user interaction
if(line.hasOption('u'))
{
myPackager.userInteractionEnabled = false;
}
if (line.hasOption('w'))
{
pkgParams.setWorkflowEnabled(false);
}
if (line.hasOption('r'))
{
pkgParams.setRestoreModeEnabled(true);
}
//keep-existing is only valid in restoreMode (-r) -- otherwise ignore -k option.
if (line.hasOption('k') && pkgParams.restoreModeEnabled())
{
pkgParams.setKeepExistingModeEnabled(true);
}
//force-replace is only valid in restoreMode (-r) -- otherwise ignore -f option.
if (line.hasOption('f') && pkgParams.restoreModeEnabled())
{
pkgParams.setReplaceModeEnabled(true);
}
if (line.hasOption('e'))
{
eperson = line.getOptionValue('e');
}
if (line.hasOption('p'))
{
parents = line.getOptionValues('p');
}
if (line.hasOption('t'))
{
myPackager.packageType = line.getOptionValue('t');
}
if (line.hasOption('i'))
{
identifier = line.getOptionValue('i');
}
if (line.hasOption('a'))
{
//enable 'recursiveMode' param to packager implementations, in case it helps with packaging or ingestion process
pkgParams.setRecursiveModeEnabled(true);
}
String files[] = line.getArgs();
if (files.length > 0)
{
sourceFile = files[0];
}
if (line.hasOption('d'))
{
myPackager.submit = false;
}
if (line.hasOption('o'))
{
String popt[] = line.getOptionValues('o');
for (int i = 0; i < popt.length; ++i)
{
String pair[] = popt[i].split("\\=", 2);
if (pair.length == 2)
{
pkgParams.addProperty(pair[0].trim(), pair[1].trim());
}
else if (pair.length == 1)
{
pkgParams.addProperty(pair[0].trim(), "");
}
else
{
System.err
.println("Warning: Illegal package option format: \""
+ popt[i] + "\"");
}
}
}
// Sanity checks on arg list: required args
// REQUIRED: sourceFile, ePerson (-e), packageType (-t)
if (sourceFile == null || eperson == null || myPackager.packageType == null)
{
System.err.println("Error - missing a REQUIRED argument or option.\n");
HelpFormatter myhelp = new HelpFormatter();
myhelp.printHelp("PackageManager [options] package-file|-\n", options);
System.exit(0);
}
// find the EPerson, assign to context
Context context = new Context();
EPerson myEPerson = null;
myEPerson = EPerson.findByEmail(context, eperson);
if (myEPerson == null)
{
usageError("Error, eperson cannot be found: " + eperson);
}
context.setCurrentUser(myEPerson);
//If we are in REPLACE mode
if(pkgParams.replaceModeEnabled())
{
PackageIngester sip = (PackageIngester) PluginManager
.getNamedPlugin(PackageIngester.class, myPackager.packageType);
if (sip == null)
{
usageError("Error, Unknown package type: " + myPackager.packageType);
}
DSpaceObject objToReplace = null;
//if a specific identifier was specified, make sure it is valid
if(identifier!=null && identifier.length()>0)
{
objToReplace = HandleManager.resolveToObject(context, identifier);
if (objToReplace == null)
{
throw new IllegalArgumentException("Bad identifier/handle -- "
+ "Cannot resolve handle \"" + identifier + "\"");
}
}
String choiceString = null;
if(myPackager.userInteractionEnabled)
{
BufferedReader input = new BufferedReader(new InputStreamReader(System.in));
System.out.println("\n\nWARNING -- You are running the packager in REPLACE mode.");
System.out.println("\nREPLACE mode may be potentially dangerous as it will automatically remove and replace contents within DSpace.");
System.out.println("We highly recommend backing up all your DSpace contents (files & database) before continuing.");
System.out.print("\nWould you like to continue? [y/n]: ");
choiceString = input.readLine();
}
else
{
//user interaction disabled -- default answer to 'yes', otherwise script won't continue
choiceString = "y";
}
if (choiceString.equalsIgnoreCase("y"))
{
System.out.println("Beginning replacement process...");
try
{
//replace the object from the source file
myPackager.replace(context, sip, pkgParams, sourceFile, objToReplace);
//commit all changes & exit successfully
context.complete();
System.exit(0);
}
catch (Exception e)
{
// abort all operations
e.printStackTrace();
context.abort();
System.out.println(e);
System.exit(1);
}
}
}
//else if normal SUBMIT mode (or basic RESTORE mode -- which is a special type of submission)
else if (myPackager.submit || pkgParams.restoreModeEnabled())
{
PackageIngester sip = (PackageIngester) PluginManager
.getNamedPlugin(PackageIngester.class, myPackager.packageType);
if (sip == null)
{
usageError("Error, Unknown package type: " + myPackager.packageType);
}
// validate each parent arg (if any)
DSpaceObject parentObjs[] = null;
if(parents!=null)
{
System.out.println("Destination parents:");
parentObjs = new DSpaceObject[parents.length];
for (int i = 0; i < parents.length; i++)
{
// sanity check: did handle resolve?
parentObjs[i] = HandleManager.resolveToObject(context,
parents[i]);
if (parentObjs[i] == null)
{
throw new IllegalArgumentException(
"Bad parent list -- "
+ "Cannot resolve parent handle \""
+ parents[i] + "\"");
}
System.out.println((i == 0 ? "Owner: " : "Parent: ")
+ parentObjs[i].getHandle());
}
}
try
{
//ingest the object from the source file
myPackager.ingest(context, sip, pkgParams, sourceFile, parentObjs);
//commit all changes & exit successfully
context.complete();
System.exit(0);
}
catch (Exception e)
{
// abort all operations
e.printStackTrace();
context.abort();
System.out.println(e);
System.exit(1);
}
}// else, if DISSEMINATE mode
else
{
//retrieve specified package disseminator
PackageDisseminator dip = (PackageDisseminator) PluginManager
.getNamedPlugin(PackageDisseminator.class, myPackager.packageType);
if (dip == null)
{
usageError("Error, Unknown package type: " + myPackager.packageType);
}
DSpaceObject dso = HandleManager.resolveToObject(context, identifier);
if (dso == null)
{
throw new IllegalArgumentException("Bad identifier/handle -- "
+ "Cannot resolve handle \"" + identifier + "\"");
}
//disseminate the requested object
myPackager.disseminate(context, dip, dso, pkgParams, sourceFile);
}
System.exit(0);
}
/**
* Ingest one or more DSpace objects from package(s) based on the
* options passed to the 'packager' script. This method is called
* for both 'submit' (-s) and 'restore' (-r) modes.
* <p>
* Please note that replace (-r -f) mode calls the replace() method instead.
*
* @param context DSpace Context
* @param sip PackageIngester which will actually ingest the package
* @param pkgParams Parameters to pass to individual packager instances
* @param sourceFile location of the source package to ingest
* @param parentObjs Parent DSpace object(s) to attach new object to
* @throws IOException
* @throws SQLException
* @throws FileNotFoundException
* @throws AuthorizeException
* @throws CrosswalkException
* @throws PackageException
*/
protected void ingest(Context context, PackageIngester sip, PackageParameters pkgParams, String sourceFile, DSpaceObject parentObjs[])
throws IOException, SQLException, FileNotFoundException, AuthorizeException, CrosswalkException, PackageException
{
// make sure we have an input file
File pkgFile = new File(sourceFile);
if(!pkgFile.exists())
{
System.out.println("\nERROR: Package located at " + sourceFile + " does not exist!");
System.exit(1);
}
System.out.println("\nIngesting package located at " + sourceFile);
//find first parent (if specified) -- this will be the "owner" of the object
DSpaceObject parent = null;
if(parentObjs!=null && parentObjs.length>0)
{
parent = parentObjs[0];
}
//NOTE: at this point, Parent may be null -- in which case it is up to the PackageIngester
// to either determine the Parent (from package contents) or throw an error.
//If we are doing a recursive ingest, call ingestAll()
if(pkgParams.recursiveModeEnabled())
{
System.out.println("\nAlso ingesting all referenced packages (recursive mode)..");
System.out.println("This may take a while, please check your logs for ongoing status while we process each package.");
//ingest first package & recursively ingest anything else that package references (child packages, etc)
List<DSpaceObject> dsoResults = sip.ingestAll(context, parent, pkgFile, pkgParams, null);
if(dsoResults!=null)
{
//Report total objects created
System.out.println("\nCREATED a total of " + dsoResults.size() + " DSpace Objects.");
String choiceString = null;
//Ask if user wants full list printed to command line, as this may be rather long.
if(this.userInteractionEnabled)
{
BufferedReader input = new BufferedReader(new InputStreamReader(System.in));
System.out.print("\nWould you like to view a list of all objects that were created? [y/n]: ");
choiceString = input.readLine();
}
else
{
// user interaction disabled -- default answer to 'yes', as
// we want to provide user with as detailed a report as possible.
choiceString = "y";
}
// Provide detailed report if user answered 'yes'
if (choiceString.equalsIgnoreCase("y"))
{
System.out.println("\n\n");
for(DSpaceObject result : dsoResults)
{
if(pkgParams.restoreModeEnabled())
{
System.out.println("RESTORED DSpace " + Constants.typeText[result.getType()] +
" [ hdl=" + result.getHandle() + ", dbID=" + result.getID() + " ] ");
}
else
{
System.out.println("CREATED new DSpace " + Constants.typeText[result.getType()] +
" [ hdl=" + result.getHandle() + ", dbID=" + result.getID() + " ] ");
}
}
}
}
}
else
{
//otherwise, just one package to ingest
try
{
DSpaceObject dso = sip.ingest(context, parent, pkgFile, pkgParams, null);
if(dso!=null)
{
if(pkgParams.restoreModeEnabled())
{
System.out.println("RESTORED DSpace " + Constants.typeText[dso.getType()] +
" [ hdl=" + dso.getHandle() + ", dbID=" + dso.getID() + " ] ");
}
else
{
System.out.println("CREATED new DSpace " + Constants.typeText[dso.getType()] +
" [ hdl=" + dso.getHandle() + ", dbID=" + dso.getID() + " ] ");
}
}
}
catch(IllegalStateException ie)
{
// NOTE: if we encounter an IllegalStateException, this means the
// handle is already in use and this object already exists.
//if we are skipping over (i.e. keeping) existing objects
if(pkgParams.keepExistingModeEnabled())
{
System.out.println("\nSKIPPED processing package '" + pkgFile + "', as an Object already exists with this handle.");
}
else // Pass this exception on -- which essentially causes a full rollback of all changes (this is the default)
{
throw ie;
}
}
}
}
/**
* Disseminate one or more DSpace objects into package(s) based on the
* options passed to the 'packager' script
*
* @param context DSpace context
* @param dip PackageDisseminator which will actually create the package
* @param dso DSpace Object to disseminate as a package
* @param pkgParams Parameters to pass to individual packager instances
* @param outputFile File where final package should be saved
* @param identifier identifier of main DSpace object to disseminate
* @throws IOException
* @throws SQLException
* @throws FileNotFoundException
* @throws AuthorizeException
* @throws CrosswalkException
* @throws PackageException
*/
protected void disseminate(Context context, PackageDisseminator dip, DSpaceObject dso, PackageParameters pkgParams, String outputFile)
throws IOException, SQLException, FileNotFoundException, AuthorizeException, CrosswalkException, PackageException
{
// initialize output file
File pkgFile = new File(outputFile);
System.out.println("\nDisseminating DSpace " + Constants.typeText[dso.getType()] +
" [ hdl=" + dso.getHandle() + " ] to " + outputFile);
//If we are doing a recursive dissemination of this object & all its child objects, call disseminateAll()
if(pkgParams.recursiveModeEnabled())
{
System.out.println("\nAlso disseminating all child objects (recursive mode)..");
System.out.println("This may take a while, please check your logs for ongoing status while we process each package.");
//disseminate initial object & recursively disseminate all child objects as well
List<File> fileResults = dip.disseminateAll(context, dso, pkgParams, pkgFile);
if(fileResults!=null)
{
//Report total files created
System.out.println("\nCREATED a total of " + fileResults.size() + " dissemination package files.");
String choiceString = null;
//Ask if user wants full list printed to command line, as this may be rather long.
if(this.userInteractionEnabled)
{
BufferedReader input = new BufferedReader(new InputStreamReader(System.in));
System.out.print("\nWould you like to view a list of all files that were created? [y/n]: ");
choiceString = input.readLine();
}
else
{
// user interaction disabled -- default answer to 'yes', as
// we want to provide user with as detailed a report as possible.
choiceString = "y";
}
// Provide detailed report if user answered 'yes'
if (choiceString.equalsIgnoreCase("y"))
{
System.out.println("\n\n");
for(File result : fileResults)
{
System.out.println("CREATED package file: " + result.getCanonicalPath());
}
}
}
}
else
{
//otherwise, just disseminate a single object to a single package file
dip.disseminate(context, dso, pkgParams, pkgFile);
if(pkgFile!=null && pkgFile.exists())
{
System.out.println("\nCREATED package file: " + pkgFile.getCanonicalPath());
}
}
}
/**
* Replace an one or more existing DSpace objects with the contents of
* specified package(s) based on the options passed to the 'packager' script.
* This method is only called for full replaces ('-r -f' options specified)
*
* @param context DSpace Context
* @param sip PackageIngester which will actually replace the object with the package
* @param pkgParams Parameters to pass to individual packager instances
* @param sourceFile location of the source package to ingest as the replacement
* @param objToReplace DSpace object to replace (may be null if it will be specified in the package itself)
* @throws IOException
* @throws SQLException
* @throws FileNotFoundException
* @throws AuthorizeException
* @throws CrosswalkException
* @throws PackageException
*/
protected void replace(Context context, PackageIngester sip, PackageParameters pkgParams, String sourceFile, DSpaceObject objToReplace)
throws IOException, SQLException, FileNotFoundException, AuthorizeException, CrosswalkException, PackageException
{
// make sure we have an input file
File pkgFile = new File(sourceFile);
if(!pkgFile.exists())
{
System.out.println("\nPackage located at " + sourceFile + " does not exist!");
System.exit(1);
}
System.out.println("\nReplacing DSpace object(s) with package located at " + sourceFile);
if(objToReplace!=null)
{
System.out.println("Will replace existing DSpace " + Constants.typeText[objToReplace.getType()] +
" [ hdl=" + objToReplace.getHandle() + " ]");
}
// NOTE: At this point, objToReplace may be null. If it is null, it is up to the PackageIngester
// to determine which Object needs to be replaced (based on the handle specified in the pkg, etc.)
//If we are doing a recursive replace, call replaceAll()
if(pkgParams.recursiveModeEnabled())
{
//ingest first object using package & recursively replace anything else that package references (child objects, etc)
List<DSpaceObject> dsoResults = sip.replaceAll(context, objToReplace, pkgFile, pkgParams);
if(dsoResults!=null)
{
//Report total objects replaced
System.out.println("\nREPLACED a total of " + dsoResults.size() + " DSpace Objects.");
String choiceString = null;
//Ask if user wants full list printed to command line, as this may be rather long.
if(this.userInteractionEnabled)
{
BufferedReader input = new BufferedReader(new InputStreamReader(System.in));
System.out.print("\nWould you like to view a list of all objects that were replaced? [y/n]: ");
choiceString = input.readLine();
}
else
{
// user interaction disabled -- default answer to 'yes', as
// we want to provide user with as detailed a report as possible.
choiceString = "y";
}
// Provide detailed report if user answered 'yes'
if (choiceString.equalsIgnoreCase("y"))
{
System.out.println("\n\n");
for(DSpaceObject result : dsoResults)
{
System.out.println("REPLACED DSpace " + Constants.typeText[result.getType()] +
" [ hdl=" + result.getHandle() + " ] ");
}
}
}
}
else
{
//otherwise, just one object to replace
DSpaceObject dso = sip.replace(context, objToReplace, pkgFile, pkgParams);
if(dso!=null)
{
System.out.println("REPLACED DSpace " + Constants.typeText[dso.getType()] +
" [ hdl=" + dso.getHandle() + " ] ");
}
}
}
}
| Java |
/**
* The contents of this file are subject to the license and copyright
* detailed in the LICENSE and NOTICE files at the root of the source
* tree and available online at
*
* http://www.dspace.org/license/
*/
package org.dspace.app.bulkedit;
import org.apache.commons.cli.*;
import org.dspace.content.*;
import org.dspace.core.Constants;
import org.dspace.core.Context;
import org.dspace.handle.HandleManager;
import java.util.ArrayList;
import java.sql.SQLException;
import java.util.List;
/**
* Metadata exporter to allow the batch export of metadata into a file
*
* @author Stuart Lewis
*/
public class MetadataExport
{
/** The items to export */
private ItemIterator toExport;
/** Whether to export all metadata, or just normally edited metadata */
private boolean exportAll;
/**
* Set up a new metadata export
*
* @param c The Context
* @param toExport The ItemIterator of items to export
* @param exportAll whether to export all metadata or not (include handle, provenance etc)
*/
public MetadataExport(Context c, ItemIterator toExport, boolean exportAll)
{
// Store the export settings
this.toExport = toExport;
this.exportAll = exportAll;
}
/**
* Method to export a community (and sub-communities and collections)
*
* @param c The Context
* @param toExport The Community to export
* @param exportAll whether to export all metadata or not (include handle, provenance etc)
*/
public MetadataExport(Context c, Community toExport, boolean exportAll)
{
try
{
// Try to export the community
this.toExport = new ItemIterator(c, buildFromCommunity(toExport, new ArrayList<Integer>(), 0));
this.exportAll = exportAll;
}
catch (SQLException sqle)
{
// Something went wrong...
System.err.println("Error running exporter:");
sqle.printStackTrace(System.err);
System.exit(1);
}
}
/**
* Build an array list of item ids that are in a community (include sub-communities and collections)
*
* @param community The community to build from
* @param itemIDs The itemID (used for recursion - use an empty ArrayList)
* @param indent How many spaces to use when writing out the names of items added
* @return The list of item ids
* @throws SQLException
*/
private List<Integer> buildFromCommunity(Community community, List<Integer> itemIDs, int indent)
throws SQLException
{
// Add all the collections
Collection[] collections = community.getCollections();
for (Collection collection : collections)
{
for (int i = 0; i < indent; i++)
{
System.out.print(" ");
}
ItemIterator items = collection.getAllItems();
while (items.hasNext())
{
int id = items.next().getID();
// Only add if not already included (so mapped items only appear once)
if (!itemIDs.contains(id))
{
itemIDs.add(id);
}
}
}
// Add all the sub-communities
Community[] communities = community.getSubcommunities();
for (Community subCommunity : communities)
{
for (int i = 0; i < indent; i++)
{
System.out.print(" ");
}
buildFromCommunity(subCommunity, itemIDs, indent + 1);
}
return itemIDs;
}
/**
* Run the export
*
* @return the exported CSV lines
*/
public DSpaceCSV export()
{
try
{
// Process each item
DSpaceCSV csv = new DSpaceCSV(exportAll);
while (toExport.hasNext())
{
csv.addItem(toExport.next());
}
// Return the results
return csv;
}
catch (Exception e)
{
return null;
}
}
/**
* Print the help message
*
* @param options The command line options the user gave
* @param exitCode the system exit code to use
*/
private static void printHelp(Options options, int exitCode)
{
// print the help message
HelpFormatter myhelp = new HelpFormatter();
myhelp.printHelp("MetadataExport\n", options);
System.out.println("\nfull export: metadataexport -f filename");
System.out.println("partial export: metadataexport -i handle -f filename");
System.exit(exitCode);
}
/**
* main method to run the metadata exporter
*
* @param argv the command line arguments given
*/
public static void main(String[] argv) throws Exception
{
// Create an options object and populate it
CommandLineParser parser = new PosixParser();
Options options = new Options();
options.addOption("i", "id", true, "ID or handle of thing to export (item, collection, or community)");
options.addOption("f", "file", true, "destination where you want file written");
options.addOption("a", "all", false, "include all metadata fields that are not normally changed (e.g. provenance)");
options.addOption("h", "help", false, "help");
CommandLine line = null;
try
{
line = parser.parse(options, argv);
}
catch (ParseException pe)
{
System.err.println("Error with commands.");
printHelp(options, 1);
System.exit(0);
}
if (line.hasOption('h'))
{
printHelp(options, 0);
}
// Check a filename is given
if (!line.hasOption('f'))
{
System.err.println("Required parameter -f missing!");
printHelp(options, 1);
}
String filename = line.getOptionValue('f');
// Create a context
Context c = new Context();
c.turnOffAuthorisationSystem();
// The things we'll export
ItemIterator toExport = null;
MetadataExport exporter = null;
// Export everything?
boolean exportAll = line.hasOption('a');
// Check we have an item OK
if (!line.hasOption('i'))
{
System.out.println("Exporting whole repository WARNING: May take some time!");
exporter = new MetadataExport(c, Item.findAll(c), exportAll);
}
else
{
String handle = line.getOptionValue('i');
DSpaceObject dso = HandleManager.resolveToObject(c, handle);
if (dso == null)
{
System.err.println("Item '" + handle + "' does not resolve to an item in your repository!");
printHelp(options, 1);
}
if (dso.getType() == Constants.ITEM)
{
System.out.println("Exporting item '" + dso.getName() + "' (" + handle + ")");
List<Integer> item = new ArrayList<Integer>();
item.add(dso.getID());
exporter = new MetadataExport(c, new ItemIterator(c, item), exportAll);
}
else if (dso.getType() == Constants.COLLECTION)
{
System.out.println("Exporting collection '" + dso.getName() + "' (" + handle + ")");
Collection collection = (Collection)dso;
toExport = collection.getAllItems();
exporter = new MetadataExport(c, toExport, exportAll);
}
else if (dso.getType() == Constants.COMMUNITY)
{
System.out.println("Exporting community '" + dso.getName() + "' (" + handle + ")");
exporter = new MetadataExport(c, (Community)dso, exportAll);
}
else
{
System.err.println("Error identifying '" + handle + "'");
System.exit(1);
}
}
// Perform the export
DSpaceCSV csv = exporter.export();
// Save the files to the file
csv.save(filename);
// Finish off and tidy up
c.restoreAuthSystemState();
c.complete();
}
} | Java |
/**
* The contents of this file are subject to the license and copyright
* detailed in the LICENSE and NOTICE files at the root of the source
* tree and available online at
*
* http://www.dspace.org/license/
*/
package org.dspace.app.bulkedit;
import org.dspace.content.*;
import org.dspace.content.Collection;
import org.dspace.core.ConfigurationManager;
import org.dspace.core.Context;
import java.util.*;
import java.util.regex.Pattern;
import java.util.regex.Matcher;
import java.io.*;
/**
* Utility class to read and write CSV files
*
* **************
* Important Note
* **************
*
* This class has been made serializable, as it is stored in a Session.
* Is it wise to:
* a) be putting this into a user's session?
* b) holding an entire CSV upload in memory?
*
* @author Stuart Lewis
*/
public class DSpaceCSV implements Serializable
{
/** The headings of the CSV file */
private List<String> headings;
/** An array list of CSV lines */
private List<DSpaceCSVLine> lines;
/** A counter of how many CSV lines this object holds */
private int counter;
/** The value separator (defaults to double pipe '||') */
protected static String valueSeparator;
/** The value separator in an escaped form for using in regexs */
protected static String escapedValueSeparator;
/** The field separator (defaults to comma) */
protected static String fieldSeparator;
/** The field separator in an escaped form for using in regexs */
protected static String escapedFieldSeparator;
/** Whether to export all metadata such as handles and provenance information */
private boolean exportAll;
/** A list of metadata elements to ignore */
private Map<String, String> ignore;
/**
* Create a new instance of a CSV line holder
*
* @param exportAll Whether to export all metadata such as handles and provenance information
*/
public DSpaceCSV(boolean exportAll)
{
// Initialise the class
init();
// Store the exportAll setting
this.exportAll = exportAll;
}
/**
* Create a new instance, reading the lines in from file
*
* @param f The file to read from
* @param c The DSpace Context
*
* @throws Exception thrown if there is an error reading or processing the file
*/
public DSpaceCSV(File f, Context c) throws Exception
{
// Initialise the class
init();
// Open the CSV file
BufferedReader input = null;
try
{
input = new BufferedReader(new InputStreamReader(new FileInputStream(f),"UTF-8"));
// Read the heading line
String head = input.readLine();
String[] headingElements = head.split(escapedFieldSeparator);
for (String element : headingElements)
{
// Remove surrounding quotes if there are any
if ((element.startsWith("\"")) && (element.endsWith("\"")))
{
element = element.substring(1, element.length() - 1);
}
// Store the heading
if ("collection".equals(element))
{
// Store the heading
headings.add(element);
}
else if (!"id".equals(element))
{
// Verify that the heading is valid in the metadata registry
String[] clean = element.split("\\[");
String[] parts = clean[0].split("\\.");
String metadataSchema = parts[0];
String metadataElement = parts[1];
String metadataQualifier = null;
if (parts.length > 2) {
metadataQualifier = parts[2];
}
// Check that the scheme exists
MetadataSchema foundSchema = MetadataSchema.find(c, metadataSchema);
if (foundSchema == null) {
throw new MetadataImportInvalidHeadingException(clean[0],
MetadataImportInvalidHeadingException.SCHEMA);
}
// Check that the metadata element exists in the schema
int schemaID = foundSchema.getSchemaID();
MetadataField foundField = MetadataField.findByElement(c, schemaID, metadataElement, metadataQualifier);
if (foundField == null) {
throw new MetadataImportInvalidHeadingException(clean[0],
MetadataImportInvalidHeadingException.ELEMENT);
}
// Store the heading
headings.add(element);
}
}
// Read each subsequent line
StringBuilder lineBuilder = new StringBuilder();
String lineRead;
while ((lineRead = input.readLine()) != null)
{
if (lineBuilder.length() > 0) {
// Already have a previously read value - add this line
lineBuilder.append("\n").append(lineRead);
// Count the number of quotes in the buffer
int quoteCount = 0;
for (int pos = 0; pos < lineBuilder.length(); pos++) {
if (lineBuilder.charAt(pos) == '"') {
quoteCount++;
}
}
if (quoteCount % 2 == 0) {
// Number of quotes is a multiple of 2, add the item
addItem(lineBuilder.toString());
lineBuilder = new StringBuilder();
}
} else if (lineRead.indexOf('"') > -1) {
// Get the number of quotes in the line
int quoteCount = 0;
for (int pos = 0; pos < lineRead.length(); pos++) {
if (lineRead.charAt(pos) == '"') {
quoteCount++;
}
}
if (quoteCount % 2 == 0) {
// Number of quotes is a multiple of 2, add the item
addItem(lineRead);
} else {
// Uneven quotes - add to the buffer and leave for later
lineBuilder.append(lineRead);
}
} else {
// No previously read line, and no quotes in the line - add item
addItem(lineRead);
}
}
}
finally
{
if (input != null)
{
input.close();
}
}
}
/**
* Initialise this class with values from dspace.cfg
*/
private void init()
{
// Set the value separator
setValueSeparator();
// Set the field separator
setFieldSeparator();
// Create the headings
headings = new ArrayList<String>();
// Create the blank list of items
lines = new ArrayList<DSpaceCSVLine>();
// Initialise the counter
counter = 0;
// Set the metadata fields to ignore
ignore = new HashMap<String, String>();
String toIgnore = ConfigurationManager.getProperty("bulkedit.ignore-on-export");
if ((toIgnore == null) || ("".equals(toIgnore.trim())))
{
// Set a default value
toIgnore = "dc.date.accessioned, dc.date.available, " +
"dc.date.updated, dc.description.provenance";
}
String[] toIgnoreArray = toIgnore.split(",");
for (String toIgnoreString : toIgnoreArray)
{
if (!"".equals(toIgnoreString.trim()))
{
ignore.put(toIgnoreString.trim(), toIgnoreString.trim());
}
}
}
/**
* Set the value separator for multiple values stored in one csv value.
*
* Is set in dspace.cfg as bulkedit.valueseparator
*
* If not set, defaults to double pipe '||'
*/
private void setValueSeparator()
{
// Get the value separator
valueSeparator = ConfigurationManager.getProperty("bulkedit.valueseparator");
if ((valueSeparator != null) && (!"".equals(valueSeparator.trim())))
{
valueSeparator = valueSeparator.trim();
}
else
{
valueSeparator = "||";
}
// Now store the escaped version
Pattern spchars = Pattern.compile("([\\\\*+\\[\\](){}\\$.?\\^|])");
Matcher match = spchars.matcher(valueSeparator);
escapedValueSeparator = match.replaceAll("\\\\$1");
}
/**
* Set the field separator use to separate fields in the csv.
*
* Is set in dspace.cfg as bulkedit.fieldseparator
*
* If not set, defaults to comma ','.
*
* Special values are 'tab', 'hash' and 'semicolon' which will
* get substituted from the text to the value.
*/
private void setFieldSeparator()
{
// Get the value separator
fieldSeparator = ConfigurationManager.getProperty("bulkedit.fieldseparator");
if ((fieldSeparator != null) && (!"".equals(fieldSeparator.trim())))
{
fieldSeparator = fieldSeparator.trim();
if ("tab".equals(fieldSeparator))
{
fieldSeparator = "\t";
}
else if ("semicolon".equals(fieldSeparator))
{
fieldSeparator = ";";
}
else if ("hash".equals(fieldSeparator))
{
fieldSeparator = "#";
}
else
{
fieldSeparator = fieldSeparator.trim();
}
}
else
{
fieldSeparator = ",";
}
// Now store the escaped version
Pattern spchars = Pattern.compile("([\\\\*+\\[\\](){}\\$.?\\^|])");
Matcher match = spchars.matcher(fieldSeparator);
escapedFieldSeparator = match.replaceAll("\\\\$1");
}
/**
* Add a DSpace item to the CSV file
*
* @param i The DSpace item
*
* @throws Exception if something goes wrong with adding the Item
*/
public final void addItem(Item i) throws Exception
{
// Create the CSV line
DSpaceCSVLine line = new DSpaceCSVLine(i.getID());
// Add in owning collection
String owningCollectionHandle = i.getOwningCollection().getHandle();
line.add("collection", owningCollectionHandle);
// Add in any mapped collections
Collection[] collections = i.getCollections();
for (Collection c : collections)
{
// Only add if it is not the owning collection
if (!c.getHandle().equals(owningCollectionHandle))
{
line.add("collection", c.getHandle());
}
}
// Populate it
DCValue md[] = i.getMetadata(Item.ANY, Item.ANY, Item.ANY, Item.ANY);
for (DCValue value : md)
{
// Get the key (schema.element)
String key = value.schema + "." + value.element;
// Add the qualifier if there is one (schema.element.qualifier)
if (value.qualifier != null)
{
key = key + "." + value.qualifier;
}
// Add the language if there is one (schema.element.qualifier[langauge])
//if ((value.language != null) && (!"".equals(value.language)))
if (value.language != null)
{
key = key + "[" + value.language + "]";
}
// Store the item
if (exportAll || okToExport(value))
{
line.add(key, value.value);
if (!headings.contains(key))
{
headings.add(key);
}
}
}
lines.add(line);
counter++;
}
/**
* Add an item to the CSV file, from a CSV line of elements
*
* @param line The line of elements
* @throws Exception Thrown if an error occurs when adding the item
*/
public final void addItem(String line) throws Exception
{
// Check to see if the last character is a field separator, which hides the last empy column
boolean last = false;
if (line.endsWith(fieldSeparator))
{
// Add a space to the end, then remove it later
last = true;
line += " ";
}
// Split up on field separator
String[] parts = line.split(escapedFieldSeparator);
ArrayList<String> bits = new ArrayList<String>();
bits.addAll(Arrays.asList(parts));
// Merge parts with embedded separators
boolean alldone = false;
while (!alldone)
{
boolean found = false;
int i = 0;
for (String part : bits)
{
int bitcounter = part.length() - part.replaceAll("\"", "").length();
if ((part.startsWith("\"")) && ((!part.endsWith("\"")) || ((bitcounter & 1) == 1)))
{
found = true;
String add = bits.get(i) + fieldSeparator + bits.get(i + 1);
bits.remove(i);
bits.add(i, add);
bits.remove(i + 1);
break;
}
i++;
}
alldone = !found;
}
// Deal with quotes around the elements
int i = 0;
for (String part : bits)
{
if ((part.startsWith("\"")) && (part.endsWith("\"")))
{
part = part.substring(1, part.length() - 1);
bits.set(i, part);
}
i++;
}
// Remove embedded quotes
i = 0;
for (String part : bits)
{
if (part.contains("\"\""))
{
part = part.replaceAll("\"\"", "\"");
bits.set(i, part);
}
i++;
}
// Add elements to a DSpaceCSVLine
String id = parts[0].replaceAll("\"", "");
DSpaceCSVLine csvLine;
// Is this an existing item, or a new item (where id = '+')
if ("+".equals(id))
{
csvLine = new DSpaceCSVLine();
}
else
{
try
{
csvLine = new DSpaceCSVLine(Integer.parseInt(id));
}
catch (NumberFormatException nfe)
{
System.err.println("Invalid item identifier: " + id);
System.err.println("Please check your CSV file for information. " +
"Item id must be numeric, or a '+' to add a new item");
throw(nfe);
}
}
// Add the rest of the parts
i = 0;
for (String part : bits)
{
if (i > 0)
{
// Is this a last empty item?
if ((last) && (i == headings.size()))
{
part = "";
}
// Make sure we register that this column was there
csvLine.add(headings.get(i - 1), null);
String[] elements = part.split(escapedValueSeparator);
for (String element : elements)
{
if ((element != null) && (!"".equals(element)))
{
csvLine.add(headings.get(i - 1), element);
}
}
}
i++;
}
lines.add(csvLine);
counter++;
}
/**
* Get the lines in CSV holders
*
* @return The lines
*/
public final List<DSpaceCSVLine> getCSVLines()
{
// Return the lines
return lines;
}
/**
* Get the CSV lines as an array of CSV formatted strings
*
* @return the array of CSV formatted Strings
*/
public final String[] getCSVLinesAsStringArray()
{
// Create the headings line
String[] csvLines = new String[counter + 1];
csvLines[0] = "id" + fieldSeparator + "collection";
Collections.sort(headings);
for (String value : headings)
{
csvLines[0] = csvLines[0] + fieldSeparator + value;
}
Iterator<DSpaceCSVLine> i = lines.iterator();
int c = 1;
while (i.hasNext())
{
csvLines[c++] = i.next().toCSV(headings);
}
return csvLines;
}
/**
* Save the CSV file to the given filename
*
* @param filename The filename to save the CSV file to
*
* @throws IOException Thrown if an error occurs when writing the file
*/
public final void save(String filename) throws IOException
{
// Save the file
BufferedWriter out = new BufferedWriter(
new OutputStreamWriter(
new FileOutputStream(filename), "UTF-8"));
for (String csvLine : getCSVLinesAsStringArray()) {
out.write(csvLine + "\n");
}
out.flush();
out.close();
}
/**
* Is it Ok to export this value? When exportAll is set to false, we don't export
* some of the metadata elements.
*
* The list can be configured via the key bulkedit.ignore-on-export in dspace.cfg
*
* @param md The DCValue to examine
* @return Whether or not it is OK to export this element
*/
private final boolean okToExport(DCValue md)
{
// First check the metadata format, and K all non DC elements
if (!"dc".equals(md.schema))
{
return true;
}
// Now compare with the list to ignore
String key = md.schema + "." + md.element;
if (md.qualifier != null)
{
key += "." + md.qualifier;
}
if (ignore.get(key) != null) {
return false;
}
// Must be OK, so don't ignore
return true;
}
/**
* Get the headings used in this CSV file
*
* @return The headings
*/
public List<String> getHeadings()
{
return headings;
}
/**
* Return the csv file as one long formatted string
*
* @return The formatted String as a csv
*/
public final String toString()
{
// Return the csv as one long string
StringBuffer csvLines = new StringBuffer();
String[] lines = this.getCSVLinesAsStringArray();
for (String line : lines)
{
csvLines.append(line).append("\n");
}
return csvLines.toString();
}
}
| Java |
/**
* The contents of this file are subject to the license and copyright
* detailed in the LICENSE and NOTICE files at the root of the source
* tree and available online at
*
* http://www.dspace.org/license/
*/
package org.dspace.app.bulkedit;
/**
* Metadata importer exception
*
* @author Stuart Lewis
*/
public class MetadataImportInvalidHeadingException extends Exception
{
/** The type of error (schema or element) */
private int type;
/** The bad heading */
private String badHeading;
/** Error with the schema */
public static final int SCHEMA = 0;
/** Error with the element */
public static final int ELEMENT = 1;
/**
* Instantiate a new MetadataImportInvalidHeadingException
*
* @param message the error message
* @param theType the type of the error
*/
public MetadataImportInvalidHeadingException(String message, int theType)
{
super(message);
badHeading = message;
type = theType;
}
/**
* Get the type of the exception
*
* @return the type of the exception
*/
public String getType()
{
return "" + type;
}
/**
* Get the heading that was invalid
*
* @return the invalid heading
*/
public String getBadHeader()
{
return badHeading;
}
/**
* Get the exception message
*
* @return The exception message
*/
public String getMessage()
{
if (type == SCHEMA)
{
return "Unknown metadata schema in heading: " + badHeading;
}
else
{
return "Unknown metadata element in heading: " + badHeading;
}
}
} | Java |
/**
* The contents of this file are subject to the license and copyright
* detailed in the LICENSE and NOTICE files at the root of the source
* tree and available online at
*
* http://www.dspace.org/license/
*/
package org.dspace.app.bulkedit;
import org.apache.commons.cli.*;
import org.dspace.content.*;
import org.dspace.core.Context;
import org.dspace.core.Constants;
import org.dspace.authorize.AuthorizeException;
import org.dspace.handle.HandleManager;
import org.dspace.eperson.EPerson;
import org.dspace.workflow.WorkflowManager;
import java.util.ArrayList;
import java.io.File;
import java.io.InputStreamReader;
import java.io.BufferedReader;
import java.io.IOException;
import java.sql.SQLException;
import java.util.List;
/**
* Metadata importer to allow the batch import of metadata from a file
*
* @author Stuart Lewis
*/
public class MetadataImport
{
/** The Context */
Context c;
/** The lines to import */
List<DSpaceCSVLine> toImport;
/**
* Create an instance of the metadata importer. Requires a context and an array of CSV lines
* to examine.
*
* @param c The context
* @param toImport An array of CSV lines to examine
*/
public MetadataImport(Context c, List<DSpaceCSVLine> toImport)
{
// Store the import settings
this.c = c;
this.toImport = toImport;
}
/**
* Run an import. The import can either be read-only to detect changes, or
* can write changes as it goes.
*
* @param change Whether or not to write the changes to the database
* @param useWorkflow Whether the workflows should be used when creating new items
* @param workflowNotify If the workflows should be used, whether to send notifications or not
* @param useTemplate Use collection template if create new item
* @return An array of BulkEditChange elements representing the items that have changed
*
* @throws MetadataImportException if something goes wrong
*/
public List<BulkEditChange> runImport(boolean change,
boolean useWorkflow,
boolean workflowNotify,
boolean useTemplate) throws MetadataImportException
{
// Store the changes
ArrayList<BulkEditChange> changes = new ArrayList<BulkEditChange>();
// Make the changes
try
{
// Process each change
for (DSpaceCSVLine line : toImport)
{
// Get the DSpace item to compare with
int id = line.getID();
// Is this a new item?
if (id != -1)
{
// Get the item
Item item = Item.find(c, id);
if (item == null)
{
throw new MetadataImportException("Unknown item ID " + id);
}
BulkEditChange whatHasChanged = new BulkEditChange(item);
// Has it moved collection?
List<String> collections = line.get("collection");
if (collections != null)
{
// Sanity check we're not orphaning it
if (collections.size() == 0)
{
throw new MetadataImportException("Missing collection from item " + item.getHandle());
}
Collection[] actualCollections = item.getCollections();
compare(item, collections, actualCollections, whatHasChanged, change);
}
// Iterate through each metadata element in the csv line
for (String md : line.keys())
{
// Get the values we already have
if (!"id".equals(md))
{
// Get the values from the CSV
String[] fromCSV = line.get(md).toArray(new String[line.get(md).size()]);
// Compare
compare(item, fromCSV, change, md, whatHasChanged);
}
}
// Only record if changes have been made
if (whatHasChanged.hasChanges())
{
changes.add(whatHasChanged);
}
}
else
{
// This is marked as a new item, so no need to compare
// First check a user is set, otherwise this can't happen
if (c.getCurrentUser() == null)
{
throw new MetadataImportException("When adding new items, a user must be specified with the -e option");
}
// Iterate through each metadata element in the csv line
BulkEditChange whatHasChanged = new BulkEditChange();
for (String md : line.keys())
{
// Get the values we already have
if (!"id".equals(md))
{
// Get the values from the CSV
String[] fromCSV = line.get(md).toArray(new String[line.get(md).size()]);
// Add all the values from the CSV line
add(fromCSV, md, whatHasChanged);
}
}
// Check it has an owning collection
List<String> collections = line.get("collection");
if (collections == null)
{
throw new MetadataImportException("New items must have a 'collection' assigned in the form of a handle");
}
// Check collections are really collections
ArrayList<Collection> check = new ArrayList<Collection>();
Collection collection;
for (String handle : collections)
{
try
{
// Resolve the handle to the collection
collection = (Collection)HandleManager.resolveToObject(c, handle);
// Check it resolved OK
if (collection == null)
{
throw new MetadataImportException("'" + handle + "' is not a Collection! You must specify a valid collection for new items");
}
// Check for duplicate
if (check.contains(collection))
{
throw new MetadataImportException("Duplicate collection assignment detected in new item! " + handle);
}
else
{
check.add(collection);
}
}
catch (Exception ex)
{
throw new MetadataImportException("'" + handle + "' is not a Collection! You must specify a valid collection for new items", ex);
}
}
// Record the addition to collections
boolean first = true;
for (String handle : collections)
{
Collection extra = (Collection)HandleManager.resolveToObject(c, handle);
if (first)
{
whatHasChanged.setOwningCollection(extra);
}
else
{
whatHasChanged.registerNewMappedCollection(extra);
}
first = false;
}
// Create the new item?
if (change)
{
// Create the item
String collectionHandle = line.get("collection").get(0);
collection = (Collection)HandleManager.resolveToObject(c, collectionHandle);
WorkspaceItem wsItem = WorkspaceItem.create(c, collection, useTemplate);
Item item = wsItem.getItem();
// Add the metadata to the item
for (DCValue dcv : whatHasChanged.getAdds())
{
item.addMetadata(dcv.schema,
dcv.element,
dcv.qualifier,
dcv.language,
dcv.value);
}
// Should the workflow be used?
if ((useWorkflow) && (workflowNotify))
{
WorkflowManager.start(c, wsItem);
}
else if (useWorkflow)
{
WorkflowManager.startWithoutNotify(c, wsItem);
}
else
{
// Install the item
InstallItem.installItem(c, wsItem);
}
// Add to extra collections
if (line.get("collection").size() > 0)
{
for (int i = 1; i < collections.size(); i++)
{
String handle = collections.get(i);
Collection extra = (Collection)HandleManager.resolveToObject(c, handle);
extra.addItem(item);
}
}
// Commit changes to the object
c.commit();
whatHasChanged.setItem(item);
}
// Record the changes
changes.add(whatHasChanged);
}
}
}
catch (MetadataImportException mie)
{
throw mie;
}
catch (Exception e)
{
e.printStackTrace();
}
// Return the changes
return changes;
}
/**
* Compare an item metadata with a line from CSV, and optionally update the item
*
* @param item The current item metadata
* @param fromCSV The metadata from the CSV file
* @param change Whether or not to make the update
* @param md The element to compare
* @param changes The changes object to populate
*
* @throws SQLException if there is a problem accessing a Collection from the database, from its handle
* @throws AuthorizeException if there is an authorization problem with permissions
*/
private void compare(Item item, String[] fromCSV, boolean change,
String md, BulkEditChange changes) throws SQLException, AuthorizeException
{
// Don't compare collections
if ("collection".equals(md))
{
return;
}
// Make a String array of the current values stored in this element
// First, strip of language if it is there
String language = null;
if (md.contains("["))
{
String[] bits = md.split("\\[");
language = bits[1].substring(0, bits[1].length() - 1);
}
String[] bits = md.split("\\.");
String schema = bits[0];
String element = bits[1];
// If there is a language on the element, strip if off
if (element.contains("["))
{
element = element.substring(0, element.indexOf('['));
}
String qualifier = null;
if (bits.length > 2)
{
qualifier = bits[2];
// If there is a language, strip if off
if (qualifier.contains("["))
{
qualifier = qualifier.substring(0, qualifier.indexOf('['));
}
}
DCValue[] current = item.getMetadata(schema, element, qualifier, language);
String[] dcvalues = new String[current.length];
int i = 0;
for (DCValue dcv : current)
{
dcvalues[i] = dcv.value;
i++;
}
// Compare from csv->current
for (String value : dcvalues)
{
// Look to see if it should be removed
DCValue dcv = new DCValue();
dcv.schema = schema;
dcv.element = element;
dcv.qualifier = qualifier;
dcv.language = language;
dcv.value = value;
if ((value != null) && (!"".equals(value)) && (!contains(value, fromCSV)))
{
// Remove it
changes.registerRemove(dcv);
}
else
{
// Keep it
changes.registerConstant(dcv);
}
}
// Compare from current->csv
for (String value : fromCSV)
{
// Look to see if it should be added
DCValue dcv = new DCValue();
dcv.schema = schema;
dcv.element = element;
dcv.qualifier = qualifier;
dcv.language = language;
dcv.value = value;
if ((value != null) && (!"".equals(value)) && (!contains(value, dcvalues)))
{
changes.registerAdd(dcv);
}
}
// Update the item if it has changed
if ((change) &&
((changes.getAdds().size() > 0) || (changes.getRemoves().size() > 0)))
{
// Get the complete list of what values should now be in that element
List<DCValue> list = changes.getComplete();
List<String> values = new ArrayList<String>();
for (DCValue value : list)
{
if ((qualifier == null) && (language == null))
{
if ((schema.equals(value.schema)) &&
(element.equals(value.element)) &&
(value.qualifier == null) &&
(value.language == null))
{
values.add(value.value);
}
}
else if (qualifier == null)
{
if ((schema.equals(value.schema)) &&
(element.equals(value.element)) &&
(language.equals(value.language)) &&
(value.qualifier == null))
{
values.add(value.value);
}
}
else if (language == null)
{
if ((schema.equals(value.schema)) &&
(element.equals(value.element)) &&
(qualifier.equals(value.qualifier)) &&
(value.language == null))
{
values.add(value.value);
}
}
else
{
if ((schema.equals(value.schema)) &&
(element.equals(value.element)) &&
(qualifier.equals(value.qualifier)) &&
(language.equals(value.language)))
{
values.add(value.value);
}
}
}
// Set those values
item.clearMetadata(schema, element, qualifier, language);
String[] theValues = values.toArray(new String[values.size()]);
item.addMetadata(schema, element, qualifier, language, theValues);
item.update();
}
}
/**
* Compare changes between an items owning collection and mapped collections
* and what is in the CSV file
*
* @param item The item in question
* @param collections The collection handles from the CSV file
* @param actualCollections The Collections from the actual item
* @param bechange The bulkedit change object for this item
* @param change Whether or not to actuate a change
*
* @throws SQLException if there is a problem accessing a Collection from the database, from its handle
* @throws AuthorizeException if there is an authorization problem with permissions
* @throws IOException Can be thrown when moving items in communities
* @throws MetadataImportException If something goes wrong to be reported back to the user
*/
private void compare(Item item,
List<String> collections,
Collection[] actualCollections,
BulkEditChange bechange,
boolean change) throws SQLException, AuthorizeException, IOException, MetadataImportException
{
// First, check the owning collection (as opposed to mapped collections) is the same of changed
String oldOwner = item.getOwningCollection().getHandle();
String newOwner = collections.get(0);
// Resolve the handle to the collection
Collection newCollection = (Collection)HandleManager.resolveToObject(c, newOwner);
// Check it resolved OK
if (newCollection == null)
{
throw new MetadataImportException("'" + newOwner + "' is not a Collection! You must specify a valid collection ID");
}
if (!oldOwner.equals(newOwner))
{
// Register the old and new owning collections
bechange.changeOwningCollection(item.getOwningCollection(), (Collection)HandleManager.resolveToObject(c, newOwner));
}
// Second, loop through the strings from the CSV of mapped collections
boolean first = true;
for (String csvcollection : collections)
{
// Ignore the first collection as this is the owning collection
if (!first)
{
// Look for it in the actual list of Collections
boolean found = false;
for (Collection collection : actualCollections)
{
if (collection.getID() != item.getOwningCollection().getID()) {
// Is it there?
if (csvcollection.equals(collection.getHandle()))
{
found = true;
}
}
}
// Was it found?
DSpaceObject dso = HandleManager.resolveToObject(c, csvcollection);
if ((dso == null) || (dso.getType() != Constants.COLLECTION))
{
throw new MetadataImportException("Collection defined for item " + item.getID() +
" (" + item.getHandle() + ") is not a collection");
}
if (!found)
{
// Register the new mapped collection
Collection col = (Collection)dso;
bechange.registerNewMappedCollection(col);
}
}
first = false;
}
// Third, loop through the strings from the current item
for (Collection collection : actualCollections)
{
// Look for it in the actual list of Collections
boolean found = false;
first = true;
for (String csvcollection : collections)
{
// Don't check the owning collection
if ((first) && (collection.getID() == item.getOwningCollection().getID()))
{
found = true;
}
else
{
// Is it there?
if (!first && collection.getHandle().equals(csvcollection))
{
found = true;
}
}
first = false;
}
// Was it found?
if (!found)
{
// Record that it isn't there any more
bechange.registerOldMappedCollection(collection);
}
}
// Process the changes
if (change)
{
// Remove old mapped collections
for (Collection c : bechange.getOldMappedCollections())
{
c.removeItem(item);
}
// Add to new owned collection
if (bechange.getNewOwningCollection() != null)
{
bechange.getNewOwningCollection().addItem(item);
item.setOwningCollection(bechange.getNewOwningCollection());
item.update();
}
// Remove from old owned collection (if still a member)
if (bechange.getOldOwningCollection() != null)
{
boolean found = false;
for (Collection c : item.getCollections())
{
if (c.getID() == bechange.getOldOwningCollection().getID())
{
found = true;
}
}
if (found)
{
bechange.getOldOwningCollection().removeItem(item);
}
}
// Add to new mapped collections
for (Collection c : bechange.getNewMappedCollections())
{
c.addItem(item);
}
}
}
/**
* Add an item metadata with a line from CSV, and optionally update the item
*
* @param fromCSV The metadata from the CSV file
* @param md The element to compare
* @param changes The changes object to populate
*
* @throws SQLException when an SQL error has occured (querying DSpace)
* @throws AuthorizeException If the user can't make the changes
*/
private void add(String[] fromCSV, String md, BulkEditChange changes)
throws SQLException, AuthorizeException
{
// Don't add owning collection
if ("collection".equals(md))
{
return;
}
// Make a String array of the values
// First, strip of language if it is there
String language = null;
if (md.contains("["))
{
String[] bits = md.split("\\[");
language = bits[1].substring(0, bits[1].length() - 1);
}
String[] bits = md.split("\\.");
String schema = bits[0];
String element = bits[1];
// If there is a language on the element, strip if off
if (element.contains("["))
{
element = element.substring(0, element.indexOf('['));
}
String qualifier = null;
if (bits.length > 2)
{
qualifier = bits[2];
// If there is a language, strip if off
if (qualifier.contains("["))
{
qualifier = qualifier.substring(0, qualifier.indexOf('['));
}
}
// Add all the values
for (String value : fromCSV)
{
// Look to see if it should be removed
DCValue dcv = new DCValue();
dcv.schema = schema;
dcv.element = element;
dcv.qualifier = qualifier;
dcv.language = language;
dcv.value = value;
// Add it
if ((value != null) && (!"".equals(value)))
{
changes.registerAdd(dcv);
}
}
}
/**
* Method to find if a String occurs in an array of Strings
*
* @param needle The String to look for
* @param haystack The array of Strings to search through
* @return Whether or not it is contained
*/
private boolean contains(String needle, String[] haystack)
{
// Look for the needle in the haystack
for (String examine : haystack)
{
if (clean(examine).equals(clean(needle)))
{
return true;
}
}
return false;
}
/**
* Clean elements before comparing
*
* @param in The element to clean
* @return The cleaned up element
*/
private String clean(String in)
{
// Check for nulls
if (in == null)
{
return null;
}
// Remove newlines as different operating systems sometimes use different formats
return in.replaceAll("\r\n", "").replaceAll("\n", "").trim();
}
/**
* Print the help message
*
* @param options The command line options the user gave
* @param exitCode the system exit code to use
*/
private static void printHelp(Options options, int exitCode)
{
// print the help message
HelpFormatter myhelp = new HelpFormatter();
myhelp.printHelp("MetatadataImport\n", options);
System.out.println("\nmetadataimport: MetadataImport -f filename");
System.exit(exitCode);
}
/**
* Display the changes that have been detected, or that have been made
*
* @param changes The changes detected
* @param changed Whether or not the changes have been made
* @return The number of items that have changed
*/
private static int displayChanges(List<BulkEditChange> changes, boolean changed)
{
// Display the changes
int changeCounter = 0;
for (BulkEditChange change : changes)
{
// Get the changes
List<DCValue> adds = change.getAdds();
List<DCValue> removes = change.getRemoves();
List<Collection> newCollections = change.getNewMappedCollections();
List<Collection> oldCollections = change.getOldMappedCollections();
if ((adds.size() > 0) || (removes.size() > 0) ||
(newCollections.size() > 0) || (oldCollections.size() > 0) ||
(change.getNewOwningCollection() != null) || (change.getOldOwningCollection() != null))
{
// Show the item
Item i = change.getItem();
System.out.println("-----------------------------------------------------------");
if (!change.isNewItem())
{
System.out.println("Changes for item: " + i.getID() + " (" + i.getHandle() + ")");
}
else
{
System.out.print("New item: ");
if (i != null)
{
if (i.getHandle() != null)
{
System.out.print(i.getID() + " (" + i.getHandle() + ")");
}
else
{
System.out.print(i.getID() + " (in workflow)");
}
}
System.out.println();
}
changeCounter++;
}
if (change.getNewOwningCollection() != null)
{
Collection c = change.getNewOwningCollection();
if (c != null)
{
String cHandle = c.getHandle();
String cName = c.getName();
if (!changed)
{
System.out.print(" + New owning collection (" + cHandle + "): ");
}
else
{
System.out.print(" + New owning collection (" + cHandle + "): ");
}
System.out.println(cName);
}
c = change.getOldOwningCollection();
if (c != null)
{
String cHandle = c.getHandle();
String cName = c.getName();
if (!changed)
{
System.out.print(" + Old owning collection (" + cHandle + "): ");
}
else
{
System.out.print(" + Old owning collection (" + cHandle + "): ");
}
System.out.println(cName);
}
}
// Show new mapped collections
for (Collection c : newCollections)
{
String cHandle = c.getHandle();
String cName = c.getName();
if (!changed)
{
System.out.print(" + Map to collection (" + cHandle + "): ");
}
else
{
System.out.print(" + Mapped to collection (" + cHandle + "): ");
}
System.out.println(cName);
}
// Show old mapped collections
for (Collection c : oldCollections)
{
String cHandle = c.getHandle();
String cName = c.getName();
if (!changed)
{
System.out.print(" + Um-map from collection (" + cHandle + "): ");
}
else
{
System.out.print(" + Un-mapped from collection (" + cHandle + "): ");
}
System.out.println(cName);
}
// Show additions
for (DCValue dcv : adds)
{
String md = dcv.schema + "." + dcv.element;
if (dcv.qualifier != null)
{
md += "." + dcv.qualifier;
}
if (dcv.language != null)
{
md += "[" + dcv.language + "]";
}
if (!changed)
{
System.out.print(" + Add (" + md + "): ");
}
else
{
System.out.print(" + Added (" + md + "): ");
}
System.out.println(dcv.value);
}
// Show removals
for (DCValue dcv : removes)
{
String md = dcv.schema + "." + dcv.element;
if (dcv.qualifier != null)
{
md += "." + dcv.qualifier;
}
if (dcv.language != null)
{
md += "[" + dcv.language + "]";
}
if (!changed)
{
System.out.println(" - Remove (" + md + "): " + dcv.value);
}
else
{
System.out.println(" - Removed (" + md + "): " + dcv.value);
}
}
}
return changeCounter;
}
/**
* main method to run the metadata exporter
*
* @param argv the command line arguments given
*/
public static void main(String[] argv)
{
// Create an options object and populate it
CommandLineParser parser = new PosixParser();
Options options = new Options();
options.addOption("f", "file", true, "source file");
options.addOption("e", "email", true, "email address or user id of user (required if adding new items)");
options.addOption("s", "silent", false, "silent operation - doesn't request confirmation of changes USE WITH CAUTION");
options.addOption("w", "workflow", false, "workflow - when adding new items, use collection workflow");
options.addOption("n", "notify", false, "notify - when adding new items using a workflow, send notification emails");
options.addOption("t", "template", false, "template - when adding new items, use the collection template (if it exists)");
options.addOption("h", "help", false, "help");
// Parse the command line arguments
CommandLine line;
try
{
line = parser.parse(options, argv);
}
catch (ParseException pe)
{
System.err.println("Error parsing command line arguments: " + pe.getMessage());
System.exit(1);
return;
}
if (line.hasOption('h'))
{
printHelp(options, 0);
}
// Check a filename is given
if (!line.hasOption('f'))
{
System.err.println("Required parameter -f missing!");
printHelp(options, 1);
}
String filename = line.getOptionValue('f');
// Option to apply template to new items
boolean useTemplate = false;
if (line.hasOption('t'))
{
useTemplate = true;
}
// Options for workflows, and workflow notifications for new items
boolean useWorkflow = false;
boolean workflowNotify = false;
if (line.hasOption('w'))
{
useWorkflow = true;
if (line.hasOption('n'))
{
workflowNotify = true;
}
}
else if (line.hasOption('n'))
{
System.err.println("Invalid option 'n': (notify) can only be specified with the 'w' (workflow) option.");
System.exit(1);
}
// Create a context
Context c;
try
{
c = new Context();
c.turnOffAuthorisationSystem();
}
catch (Exception e)
{
System.err.println("Unable to create a new DSpace Context: " + e.getMessage());
System.exit(1);
return;
}
// Find the EPerson, assign to context
try
{
if (line.hasOption('e'))
{
EPerson eperson;
String e = line.getOptionValue('e');
if (e.indexOf('@') != -1)
{
eperson = EPerson.findByEmail(c, e);
}
else
{
eperson = EPerson.find(c, Integer.parseInt(e));
}
if (eperson == null)
{
System.out.println("Error, eperson cannot be found: " + e);
System.exit(1);
}
c.setCurrentUser(eperson);
}
} catch (Exception e)
{
System.err.println("Unable to find DSpace user: " + e.getMessage());
System.exit(1);
return;
}
// Is this a silent run?
boolean change = false;
// Read lines from the CSV file
DSpaceCSV csv;
try
{
csv = new DSpaceCSV(new File(filename), c);
}
catch (Exception e)
{
System.err.println("Error reading file: " + e.getMessage());
System.exit(1);
return;
}
// Perform the first import - just highlight differences
MetadataImport importer = new MetadataImport(c, csv.getCSVLines());
List<BulkEditChange> changes;
if (!line.hasOption('s'))
{
// See what has changed
try
{
changes = importer.runImport(false, useWorkflow, workflowNotify, useTemplate);
}
catch (MetadataImportException mie)
{
System.err.println("Error: " + mie.getMessage());
System.exit(1);
return;
}
// Display the changes
int changeCounter = displayChanges(changes, false);
// If there were changes, ask if we should execute them
if (changeCounter > 0)
{
try
{
// Ask the user if they want to make the changes
System.out.println("\n" + changeCounter + " item(s) will be changed\n");
System.out.print("Do you want to make these changes? [y/n] ");
String yn = (new BufferedReader(new InputStreamReader(System.in))).readLine();
if ("y".equalsIgnoreCase(yn))
{
change = true;
}
else
{
System.out.println("No data has been changed.");
}
}
catch (IOException ioe)
{
System.err.println("Error: " + ioe.getMessage());
System.err.println("No changes have been made");
System.exit(1);
}
}
else
{
System.out.println("There were no changes detected");
}
}
else
{
change = true;
}
try
{
// If required, make the change
if (change)
{
try
{
// Make the changes
changes = importer.runImport(true, useWorkflow, workflowNotify, useTemplate);
}
catch (MetadataImportException mie)
{
System.err.println("Error: " + mie.getMessage());
System.exit(1);
return;
}
// Display the changes
displayChanges(changes, true);
// Commit the change to the DB
c.commit();
}
// Finsh off and tidy up
c.restoreAuthSystemState();
c.complete();
}
catch (Exception e)
{
c.abort();
System.err.println("Error commiting changes to database: " + e.getMessage());
System.err.println("Aborting most recent changes.");
System.exit(1);
}
}
} | Java |
/**
* The contents of this file are subject to the license and copyright
* detailed in the LICENSE and NOTICE files at the root of the source
* tree and available online at
*
* http://www.dspace.org/license/
*/
package org.dspace.app.bulkedit;
import org.dspace.content.Item;
import org.dspace.content.DCValue;
import org.dspace.content.Collection;
import java.util.ArrayList;
import java.util.List;
/**
* Utility class to store changes to item that may occur during a batch edit.
*
* @author Stuart Lewis
*/
public class BulkEditChange
{
/** The item these changes relate to */
private Item item;
/** The List of hashtables with the new elements */
private List<DCValue> adds;
/** The List of hashtables with the removed elements */
private List<DCValue> removes;
/** The List of hashtables with the unchanged elements */
private List<DCValue> constant;
/** The List of the complete set of new values (constant + adds) */
private List<DCValue> complete;
/** The list of old collections the item used to be mapped to */
private List<Collection> oldMappedCollections;
/** The list of new collections the item has been mapped into */
private List<Collection> newMappedCollections;
/** The old owning collection */
private Collection oldOwningCollection;
/** The new owning collection */
private Collection newOwningCollection;
/** Is this a new item */
private boolean newItem;
/** Have any changes actually been made? */
private boolean empty;
/**
* Initialise a change holder for a new item
*/
public BulkEditChange()
{
// Set the item to be null
item = null;
newItem = true;
empty = true;
oldOwningCollection = null;
newOwningCollection = null;
// Initialise the arrays
adds = new ArrayList<DCValue>();
removes = new ArrayList<DCValue>();
constant = new ArrayList<DCValue>();
complete = new ArrayList<DCValue>();
oldMappedCollections = new ArrayList<Collection>();
newMappedCollections = new ArrayList<Collection>();
}
/**
* Initialise a new change holder for an existing item
*
* @param i The Item to store
*/
public BulkEditChange(Item i)
{
// Store the item
item = i;
newItem = false;
empty = true;
// Initialise the arrays
adds = new ArrayList<DCValue>();
removes = new ArrayList<DCValue>();
constant = new ArrayList<DCValue>();
complete = new ArrayList<DCValue>();
oldMappedCollections = new ArrayList<Collection>();
newMappedCollections = new ArrayList<Collection>();
}
/**
* Store the item - used when a new item is created
*
* @param i The item
*/
public void setItem(Item i)
{
// Store the item
item = i;
}
/**
* Add an added metadata value
*
* @param dcv The value to add
*/
public void registerAdd(DCValue dcv)
{
// Add the added value
adds.add(dcv);
complete.add(dcv);
empty = false;
}
/**
* Add a removed metadata value
*
* @param dcv The value to remove
*/
public void registerRemove(DCValue dcv)
{
// Add the removed value
removes.add(dcv);
empty = false;
}
/**
* Add an unchanged metadata value
*
* @param dcv The value to keep unchanged
*/
public void registerConstant(DCValue dcv)
{
// Add the removed value
constant.add(dcv);
complete.add(dcv);
}
/**
* Add a new mapped Collection
*
* @param c The new mapped Collection
*/
public void registerNewMappedCollection(Collection c)
{
// Add the new owning Collection
newMappedCollections.add(c);
empty = false;
}
/**
* Add an old mapped Collection
*
* @param c The old mapped Collection
*/
public void registerOldMappedCollection(Collection c)
{
// Add the old owning Collection (if it isn't there already, or is an old collection)
boolean found = false;
if ((this.getOldOwningCollection() != null) &&
(this.getOldOwningCollection().getHandle().equals(c.getHandle())))
{
found = true;
}
for (Collection collection : oldMappedCollections)
{
if (collection.getHandle().equals(c.getHandle()))
{
found = true;
}
}
if (!found)
{
oldMappedCollections.add(c);
empty = false;
}
}
/**
* Register a change to the owning collection
*
* @param oldC The old owning collection
* @param newC The new owning collection
*/
public void changeOwningCollection(Collection oldC, Collection newC)
{
// Store the old owning collection
oldOwningCollection = oldC;
// Store the new owning collection
newOwningCollection = newC;
empty = false;
}
/**
* Set the owning collection of an item
*
* @param newC The new owning collection
*/
public void setOwningCollection(Collection newC)
{
// Store the new owning collection
newOwningCollection = newC;
//empty = false;
}
/**
* Get the DSpace Item that these changes are applicable to.
*
* @return The item
*/
public Item getItem()
{
// Return the item
return item;
}
/**
* Get the list of elements and their values that have been added.
*
* @return the list of elements and their values that have been added.
*/
public List<DCValue> getAdds()
{
// Return the array
return adds;
}
/**
* Get the list of elements and their values that have been removed.
*
* @return the list of elements and their values that have been removed.
*/
public List<DCValue> getRemoves()
{
// Return the array
return removes;
}
/**
* Get the list of unchanged values
*
* @return the list of unchanged values
*/
public List<DCValue> getConstant()
{
// Return the array
return constant;
}
/**
* Get the list of all values
*
* @return the list of all values
*/
public List<DCValue> getComplete()
{
// Return the array
return complete;
}
/**
* Get the list of new mapped Collections
*
* @return the list of new mapped collections
*/
public List<Collection> getNewMappedCollections()
{
// Return the array
return newMappedCollections;
}
/**
* Get the list of old mapped Collections
*
* @return the list of old mapped collections
*/
public List<Collection> getOldMappedCollections()
{
// Return the array
return oldMappedCollections;
}
/**
* Get the old owning collection
*
* @return the old owning collection
*/
public Collection getOldOwningCollection()
{
// Return the old owning collection
return oldOwningCollection;
}
/**
* Get the new owning collection
*
* @return the new owning collection
*/
public Collection getNewOwningCollection()
{
// Return the new owning collection
return newOwningCollection;
}
/**
* Does this change object represent a new item?
*
* @return Whether or not this is for a new item
*/
public boolean isNewItem()
{
// Return the new item status
return newItem;
}
/**
* Have any changes actually been recorded, or is this empty?
*
* @return Whether or not changes have been made
*/
public boolean hasChanges()
{
return !empty;
}
} | Java |
/**
* The contents of this file are subject to the license and copyright
* detailed in the LICENSE and NOTICE files at the root of the source
* tree and available online at
*
* http://www.dspace.org/license/
*/
package org.dspace.app.bulkedit;
/**
* Metadata importer exception
*
* @author Stuart Lewis
*/
public class MetadataImportException extends Exception
{
/**
* Instantiate a new MetadataImportException
*
* @param message the error message
*/
public MetadataImportException(String message)
{
super(message);
}
/**
* Instantiate a new MetadataImportException
*
* @param message the error message
* @param exception the root cause
*/
public MetadataImportException(String message, Exception exception)
{
super(message, exception);
}
} | Java |
/**
* The contents of this file are subject to the license and copyright
* detailed in the LICENSE and NOTICE files at the root of the source
* tree and available online at
*
* http://www.dspace.org/license/
*/
package org.dspace.app.bulkedit;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Set;
/**
* Utility class to store a line from a CSV file
*
* @author Stuart Lewis
*/
public class DSpaceCSVLine
{
/** The item id of the item represented by this line. -1 is for a new item */
private int id;
/** The elements in this line in a hashtable, keyed by the metadata type */
private Map<String, ArrayList> items;
/**
* Create a new CSV line
*
* @param id The item ID of the line
*/
public DSpaceCSVLine(int itemId)
{
// Store the ID + separator, and initialise the hashtable
this.id = itemId;
items = new HashMap<String, ArrayList>();
}
/**
* Create a new CSV line for a new item
*/
public DSpaceCSVLine()
{
// Set the ID to be -1, and initialise the hashtable
this.id = -1;
this.items = new HashMap<String, ArrayList>();
}
/**
* Get the item ID that this line represents
*
* @return The item ID
*/
public int getID()
{
// Return the ID
return id;
}
/**
* Add a new metadata value to this line
*
* @param key The metadata key (e.g. dc.contributor.author)
* @param value The metadata value
*/
public void add(String key, String value)
{
// Create the array list if we need to
if (items.get(key) == null)
{
items.put(key, new ArrayList<String>());
}
// Store the item if it is not null
if (value != null)
{
items.get(key).add(value);
}
}
/**
* Get all the values that match the given metadata key. Will be null if none exist.
*
* @param key The metadata key
* @return All the elements that match
*/
public List<String> get(String key)
{
// Return any relevant values
return items.get(key);
}
/**
* Get all the metadata keys that are represented in this line
*
* @return An enumeration of all the keys
*/
public Set<String> keys()
{
// Return the keys
return items.keySet();
}
/**
* Write this line out as a CSV formatted string, in the order given by the headings provided
*
* @param headings The headings which define the order the elements must be presented in
* @return The CSV formatted String
*/
protected String toCSV(List<String> headings)
{
StringBuilder bits = new StringBuilder();
// Add the id
bits.append("\"").append(id).append("\"").append(DSpaceCSV.fieldSeparator);
bits.append(valueToCSV(items.get("collection")));
// Add the rest of the elements
for (String heading : headings)
{
bits.append(DSpaceCSV.fieldSeparator);
List<String> values = items.get(heading);
if (values != null && !"collection".equals(heading))
{
bits.append(valueToCSV(values));
}
}
return bits.toString();
}
/**
* Internal method to create a CSV formatted String joining a given set of elements
*
* @param values The values to create the string from
* @return The line as a CSV formatted String
*/
protected String valueToCSV(List<String> values)
{
// Check there is some content
if (values == null)
{
return "";
}
// Get on with the work
String s;
if (values.size() == 1)
{
s = values.get(0);
}
else
{
// Concatenate any fields together
StringBuilder str = new StringBuilder();
for (String value : values)
{
if (str.length() > 0)
{
str.append(DSpaceCSV.valueSeparator);
}
str.append(value);
}
s = str.toString();
}
// Replace internal quotes with two sets of quotes
return "\"" + s.replaceAll("\"", "\"\"") + "\"";
}
}
| Java |
/**
* The contents of this file are subject to the license and copyright
* detailed in the LICENSE and NOTICE files at the root of the source
* tree and available online at
*
* http://www.dspace.org/license/
*/
package org.dspace.app.harvest;
import java.io.IOException;
import java.sql.SQLException;
import java.util.List;
import org.apache.commons.cli.CommandLine;
import org.apache.commons.cli.CommandLineParser;
import org.apache.commons.cli.HelpFormatter;
import org.apache.commons.cli.Options;
import org.apache.commons.cli.PosixParser;
import org.dspace.authorize.AuthorizeException;
import org.dspace.browse.IndexBrowse;
import org.dspace.content.Collection;
import org.dspace.content.DSpaceObject;
import org.dspace.harvest.HarvestedCollection;
import org.dspace.content.Item;
import org.dspace.content.ItemIterator;
import org.dspace.harvest.OAIHarvester;
import org.dspace.harvest.OAIHarvester.HarvestingException;
import org.dspace.core.Constants;
import org.dspace.core.Context;
import org.dspace.eperson.EPerson;
import org.dspace.handle.HandleManager;
/**
* Test class for harvested collections.
*
* @author Alexey Maslov
*/
public class Harvest
{
private static Context context;
public static void main(String[] argv) throws Exception
{
// create an options object and populate it
CommandLineParser parser = new PosixParser();
Options options = new Options();
options.addOption("p", "purge", false, "delete all items in the collection");
options.addOption("r", "run", false, "run the standard harvest procedure");
options.addOption("g", "ping", false, "test the OAI server and set");
options.addOption("o", "once", false, "run the harvest procedure with specified parameters");
options.addOption("s", "setup", false, "Set the collection up for harvesting");
options.addOption("S", "start", false, "start the harvest loop");
options.addOption("R", "reset", false, "reset harvest status on all collections");
options.addOption("P", "purge", false, "purge all harvestable collections");
options.addOption("e", "eperson", true, "eperson");
options.addOption("c", "collection", true, "harvesting collection (handle or id)");
options.addOption("t", "type", true, "type of harvesting (0 for none)");
options.addOption("a", "address", true, "address of the OAI-PMH server");
options.addOption("i", "oai_set_id", true, "id of the PMH set representing the harvested collection");
options.addOption("m", "metadata_format", true, "the name of the desired metadata format for harvesting, resolved to namespace and crosswalk in dspace.cfg");
options.addOption("h", "help", false, "help");
CommandLine line = parser.parse(options, argv);
String command = null;
String eperson = null;
String collection = null;
String oaiSource = null;
String oaiSetID = null;
String metadataKey = null;
int harvestType = 0;
if (line.hasOption('h'))
{
HelpFormatter myhelp = new HelpFormatter();
myhelp.printHelp("Harvest\n", options);
System.out
.println("\nPING OAI server: Harvest -g -s oai_source -i oai_set_id");
System.out
.println("RUNONCE harvest with arbitrary options: Harvest -o -e eperson -c collection -t harvest_type -a oai_source -i oai_set_id -m metadata_format");
System.out
.println("SETUP a collection for harvesting: Harvest -s -c collection -t harvest_type -a oai_source -i oai_set_id -m metadata_format");
System.out
.println("RUN harvest once: Harvest -r -e eperson -c collection");
System.out
.println("START harvest scheduler: Harvest -S");
System.out
.println("RESET all harvest status: Harvest -R");
System.out
.println("PURGE a collection of items and settings: Harvest -p -e eperson -c collection");
System.out
.println("PURGE all harvestable collections: Harvest -P -e eperson");
System.exit(0);
}
if (line.hasOption('s')) {
command = "config";
}
if (line.hasOption('p')) {
command = "purge";
}
if (line.hasOption('r')) {
command = "run";
}
if (line.hasOption('g')) {
command = "ping";
}
if (line.hasOption('o')) {
command = "runOnce";
}
if (line.hasOption('S')) {
command = "start";
}
if (line.hasOption('R')) {
command = "reset";
}
if (line.hasOption('P')) {
command = "purgeAll";
}
if (line.hasOption('e')) {
eperson = line.getOptionValue('e');
}
if (line.hasOption('c')) {
collection = line.getOptionValue('c');
}
if (line.hasOption('t')) {
harvestType = Integer.parseInt(line.getOptionValue('t'));
} else {
harvestType = 0;
}
if (line.hasOption('a')) {
oaiSource = line.getOptionValue('a');
}
if (line.hasOption('i')) {
oaiSetID = line.getOptionValue('i');
}
if (line.hasOption('m')) {
metadataKey = line.getOptionValue('m');
}
// Instantiate our class
Harvest harvester = new Harvest();
harvester.context = new Context();
// Check our options
if (command == null)
{
System.out
.println("Error - no parameters specified (run with -h flag for details)");
System.exit(1);
}
// Run a single harvest cycle on a collection using saved settings.
else if ("run".equals(command))
{
if (collection == null || eperson == null)
{
System.out
.println("Error - a target collection and eperson must be provided");
System.out.println(" (run with -h flag for details)");
System.exit(1);
}
harvester.runHarvest(collection, eperson);
}
// start the harvest loop
else if ("start".equals(command))
{
startHarvester();
}
// reset harvesting status
else if ("reset".equals(command))
{
resetHarvesting();
}
// purge all collections that are set up for harvesting (obviously for testing purposes only)
else if ("purgeAll".equals(command))
{
if (eperson == null)
{
System.out
.println("Error - an eperson must be provided");
System.out.println(" (run with -h flag for details)");
System.exit(1);
}
List<Integer> cids = HarvestedCollection.findAll(context);
System.out.println("Purging the following collections (deleting items and resetting harvest status): " + cids.toString());
for (Integer cid : cids)
{
harvester.purgeCollection(cid.toString(), eperson);
}
context.complete();
}
// Delete all items in a collection. Useful for testing fresh harvests.
else if ("purge".equals(command))
{
if (collection == null || eperson == null)
{
System.out
.println("Error - a target collection and eperson must be provided");
System.out.println(" (run with -h flag for details)");
System.exit(1);
}
harvester.purgeCollection(collection, eperson);
context.complete();
//TODO: implement this... remove all items and remember to unset "last-harvested" settings
}
// Configure a collection with the three main settings
else if ("config".equals(command))
{
if (collection == null)
{
System.out.println("Error - a target collection must be provided");
System.out.println(" (run with -h flag for details)");
System.exit(1);
}
if (oaiSource == null || oaiSetID == null)
{
System.out.println("Error - both the OAI server address and OAI set id must be specified");
System.out.println(" (run with -h flag for details)");
System.exit(1);
}
if (metadataKey == null)
{
System.out.println("Error - a metadata key (commonly the prefix) must be specified for this collection");
System.out.println(" (run with -h flag for details)");
System.exit(1);
}
harvester.configureCollection(collection, harvestType, oaiSource, oaiSetID, metadataKey);
}
else if ("ping".equals(command))
{
if (oaiSource == null || oaiSetID == null)
{
System.out.println("Error - both the OAI server address and OAI set id must be specified");
System.out.println(" (run with -h flag for details)");
System.exit(1);
}
}
}
/*
* Resolve the ID into a collection and check to see if its harvesting options are set. If so, return
* the collection, if not, bail out.
*/
private Collection resolveCollection(String collectionID) {
DSpaceObject dso;
Collection targetCollection = null;
try {
// is the ID a handle?
if (collectionID != null)
{
if (collectionID.indexOf('/') != -1)
{
// string has a / so it must be a handle - try and resolve it
dso = HandleManager.resolveToObject(context, collectionID);
// resolved, now make sure it's a collection
if (dso == null || dso.getType() != Constants.COLLECTION)
{
targetCollection = null;
}
else
{
targetCollection = (Collection) dso;
}
}
// not a handle, try and treat it as an integer collection
// database ID
else
{
System.out.println("Looking up by id: " + collectionID + ", parsed as '" + Integer.parseInt(collectionID) + "', " + "in context: " + context);
targetCollection = Collection.find(context, Integer.parseInt(collectionID));
}
}
// was the collection valid?
if (targetCollection == null)
{
System.out.println("Cannot resolve " + collectionID + " to collection");
System.exit(1);
}
}
catch (SQLException se) {
se.printStackTrace();
}
return targetCollection;
}
private void configureCollection(String collectionID, int type, String oaiSource, String oaiSetId, String mdConfigId) {
System.out.println("Running: configure collection");
Collection collection = resolveCollection(collectionID);
System.out.println(collection.getID());
try {
HarvestedCollection hc = HarvestedCollection.find(context, collection.getID());
if (hc == null) {
hc = HarvestedCollection.create(context, collection.getID());
}
context.turnOffAuthorisationSystem();
hc.setHarvestParams(type, oaiSource, oaiSetId, mdConfigId);
hc.setHarvestStatus(HarvestedCollection.STATUS_READY);
hc.update();
context.restoreAuthSystemState();
context.complete();
}
catch (Exception e) {
System.out.println("Changes could not be committed");
e.printStackTrace();
System.exit(1);
}
finally {
if (context != null)
{
context.restoreAuthSystemState();
}
}
}
/**
* Purges a collection of all harvest-related data and settings. All items in the collection will be deleted.
*
* @param collectionID
* @param email
*/
private void purgeCollection(String collectionID, String email) {
System.out.println("Purging collection of all items and resetting last_harvested and harvest_message: " + collectionID);
Collection collection = resolveCollection(collectionID);
try
{
EPerson eperson = EPerson.findByEmail(context, email);
context.setCurrentUser(eperson);
context.turnOffAuthorisationSystem();
ItemIterator it = collection.getAllItems();
IndexBrowse ib = new IndexBrowse(context);
int i=0;
while (it.hasNext()) {
i++;
Item item = it.next();
System.out.println("Deleting: " + item.getHandle());
ib.itemRemoved(item);
collection.removeItem(item);
// commit every 50 items
if (i%50 == 0) {
context.commit();
i=0;
}
}
HarvestedCollection hc = HarvestedCollection.find(context, collection.getID());
if (hc != null) {
hc.setHarvestResult(null,"");
hc.setHarvestStatus(HarvestedCollection.STATUS_READY);
hc.setHarvestStartTime(null);
hc.update();
}
context.restoreAuthSystemState();
context.commit();
}
catch (Exception e) {
System.out.println("Changes could not be committed");
e.printStackTrace();
System.exit(1);
}
finally {
context.restoreAuthSystemState();
}
}
/**
* Run a single harvest cycle on the specified collection under the authorization of the supplied EPerson
*/
private void runHarvest(String collectionID, String email) {
System.out.println("Running: a harvest cycle on " + collectionID);
System.out.print("Initializing the harvester... ");
OAIHarvester harvester = null;
try {
Collection collection = resolveCollection(collectionID);
HarvestedCollection hc = HarvestedCollection.find(context, collection.getID());
harvester = new OAIHarvester(context, collection, hc);
System.out.println("success. ");
}
catch (HarvestingException hex) {
System.out.print("failed. ");
System.out.println(hex.getMessage());
throw new IllegalStateException("Unable to harvest", hex);
} catch (SQLException se) {
System.out.print("failed. ");
System.out.println(se.getMessage());
throw new IllegalStateException("Unable to access database", se);
}
try {
// Harvest will not work for an anonymous user
EPerson eperson = EPerson.findByEmail(context, email);
System.out.println("Harvest started... ");
context.setCurrentUser(eperson);
harvester.runHarvest();
context.complete();
}
catch (SQLException e) {
throw new IllegalStateException("Failed to run harvester", e);
}
catch (AuthorizeException e) {
throw new IllegalStateException("Failed to run harvester", e);
}
catch (IOException e) {
throw new IllegalStateException("Failed to run harvester", e);
}
System.out.println("Harvest complete. ");
}
/**
* Resets harvest_status and harvest_start_time flags for all collections that have a row in the harvested_collections table
*/
private static void resetHarvesting() {
System.out.print("Resetting harvest status flag on all collections... ");
try
{
List<Integer> cids = HarvestedCollection.findAll(context);
for (Integer cid : cids)
{
HarvestedCollection hc = HarvestedCollection.find(context, cid);
//hc.setHarvestResult(null,"");
hc.setHarvestStartTime(null);
hc.setHarvestStatus(HarvestedCollection.STATUS_READY);
hc.update();
}
context.commit();
System.out.println("success. ");
}
catch (Exception ex) {
System.out.println("failed. ");
ex.printStackTrace();
}
}
/**
* Starts up the harvest scheduler. Terminating this process will stop the scheduler.
*/
private static void startHarvester()
{
try
{
System.out.print("Starting harvest loop... ");
OAIHarvester.startNewScheduler();
System.out.println("running. ");
}
catch (Exception ex) {
ex.printStackTrace();
}
}
}
| Java |
/**
* The contents of this file are subject to the license and copyright
* detailed in the LICENSE and NOTICE files at the root of the source
* tree and available online at
*
* http://www.dspace.org/license/
*/
package org.dspace.app.itemexport;
import java.io.BufferedOutputStream;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileNotFoundException;
import java.io.FileOutputStream;
import java.io.FileWriter;
import java.io.IOException;
import java.io.InputStream;
import java.io.PrintWriter;
import java.sql.SQLException;
import java.text.SimpleDateFormat;
import java.util.ArrayList;
import java.util.Calendar;
import java.util.Date;
import java.util.HashSet;
import java.util.List;
import java.util.Locale;
import java.util.Set;
import java.util.zip.ZipEntry;
import java.util.zip.ZipOutputStream;
import javax.mail.MessagingException;
import org.apache.commons.cli.CommandLine;
import org.apache.commons.cli.CommandLineParser;
import org.apache.commons.cli.HelpFormatter;
import org.apache.commons.cli.Options;
import org.apache.commons.cli.PosixParser;
import org.apache.commons.lang.StringUtils;
import org.apache.log4j.Logger;
import org.dspace.content.Bitstream;
import org.dspace.content.Bundle;
import org.dspace.content.Collection;
import org.dspace.content.Community;
import org.dspace.content.DCValue;
import org.dspace.content.DSpaceObject;
import org.dspace.content.Item;
import org.dspace.content.ItemIterator;
import org.dspace.content.MetadataSchema;
import org.dspace.core.ConfigurationManager;
import org.dspace.core.Constants;
import org.dspace.core.Context;
import org.dspace.core.I18nUtil;
import org.dspace.core.LogManager;
import org.dspace.core.Utils;
import org.dspace.core.Email;
import org.dspace.eperson.EPerson;
import org.dspace.handle.HandleManager;
/**
* Item exporter to create simple AIPs for DSpace content. Currently exports
* individual items, or entire collections. For instructions on use, see
* printUsage() method.
* <P>
* ItemExport creates the simple AIP package that the importer also uses. It
* consists of:
* <P>
* /exportdir/42/ (one directory per item) / dublin_core.xml - qualified dublin
* core in RDF schema / contents - text file, listing one file per line / file1
* - files contained in the item / file2 / ...
* <P>
* issues -doesn't handle special characters in metadata (needs to turn &'s into
* &, etc.)
* <P>
* Modified by David Little, UCSD Libraries 12/21/04 to allow the registration
* of files (bitstreams) into DSpace.
*
* @author David Little
* @author Jay Paz
*/
public class ItemExport
{
private static final int SUBDIR_LIMIT = 0;
/**
* used for export download
*/
public static final String COMPRESSED_EXPORT_MIME_TYPE = "application/zip";
/** log4j logger */
private static Logger log = Logger.getLogger(ItemExport.class);
/*
*
*/
public static void main(String[] argv) throws Exception
{
// create an options object and populate it
CommandLineParser parser = new PosixParser();
Options options = new Options();
options.addOption("t", "type", true, "type: COLLECTION or ITEM");
options.addOption("i", "id", true, "ID or handle of thing to export");
options.addOption("d", "dest", true,
"destination where you want items to go");
options.addOption("m", "migrate", false, "export for migration (remove handle and metadata that will be re-created in new system)");
options.addOption("n", "number", true,
"sequence number to begin exporting items with");
options.addOption("z", "zip", true, "export as zip file (specify filename e.g. export.zip)");
options.addOption("h", "help", false, "help");
CommandLine line = parser.parse(options, argv);
String typeString = null;
String destDirName = null;
String myIDString = null;
int seqStart = -1;
int myType = -1;
Item myItem = null;
Collection mycollection = null;
if (line.hasOption('h'))
{
HelpFormatter myhelp = new HelpFormatter();
myhelp.printHelp("ItemExport\n", options);
System.out
.println("\nfull collection: ItemExport -t COLLECTION -i ID -d dest -n number");
System.out
.println("singleitem: ItemExport -t ITEM -i ID -d dest -n number");
System.exit(0);
}
if (line.hasOption('t')) // type
{
typeString = line.getOptionValue('t');
if ("ITEM".equals(typeString))
{
myType = Constants.ITEM;
}
else if ("COLLECTION".equals(typeString))
{
myType = Constants.COLLECTION;
}
}
if (line.hasOption('i')) // id
{
myIDString = line.getOptionValue('i');
}
if (line.hasOption('d')) // dest
{
destDirName = line.getOptionValue('d');
}
if (line.hasOption('n')) // number
{
seqStart = Integer.parseInt(line.getOptionValue('n'));
}
boolean migrate = false;
if (line.hasOption('m')) // number
{
migrate = true;
}
boolean zip = false;
String zipFileName = "";
if (line.hasOption('z'))
{
zip = true;
zipFileName = line.getOptionValue('z');
}
// now validate the args
if (myType == -1)
{
System.out
.println("type must be either COLLECTION or ITEM (-h for help)");
System.exit(1);
}
if (destDirName == null)
{
System.out
.println("destination directory must be set (-h for help)");
System.exit(1);
}
if (seqStart == -1)
{
System.out
.println("sequence start number must be set (-h for help)");
System.exit(1);
}
if (myIDString == null)
{
System.out
.println("ID must be set to either a database ID or a handle (-h for help)");
System.exit(1);
}
Context c = new Context();
c.setIgnoreAuthorization(true);
if (myType == Constants.ITEM)
{
// first, is myIDString a handle?
if (myIDString.indexOf('/') != -1)
{
myItem = (Item) HandleManager.resolveToObject(c, myIDString);
if ((myItem == null) || (myItem.getType() != Constants.ITEM))
{
myItem = null;
}
}
else
{
myItem = Item.find(c, Integer.parseInt(myIDString));
}
if (myItem == null)
{
System.out
.println("Error, item cannot be found: " + myIDString);
}
}
else
{
if (myIDString.indexOf('/') != -1)
{
// has a / must be a handle
mycollection = (Collection) HandleManager.resolveToObject(c,
myIDString);
// ensure it's a collection
if ((mycollection == null)
|| (mycollection.getType() != Constants.COLLECTION))
{
mycollection = null;
}
}
else if (myIDString != null)
{
mycollection = Collection.find(c, Integer.parseInt(myIDString));
}
if (mycollection == null)
{
System.out.println("Error, collection cannot be found: "
+ myIDString);
System.exit(1);
}
}
if (zip)
{
ItemIterator items;
if (myItem != null)
{
List<Integer> myItems = new ArrayList<Integer>();
myItems.add(myItem.getID());
items = new ItemIterator(c, myItems);
}
else
{
System.out.println("Exporting from collection: " + myIDString);
items = mycollection.getItems();
}
exportAsZip(c, items, destDirName, zipFileName, seqStart, migrate);
}
else
{
if (myItem != null)
{
// it's only a single item
exportItem(c, myItem, destDirName, seqStart, migrate);
}
else
{
System.out.println("Exporting from collection: " + myIDString);
// it's a collection, so do a bunch of items
ItemIterator i = mycollection.getItems();
try
{
exportItem(c, i, destDirName, seqStart, migrate);
}
finally
{
if (i != null)
{
i.close();
}
}
}
}
c.complete();
}
private static void exportItem(Context c, ItemIterator i,
String destDirName, int seqStart, boolean migrate) throws Exception
{
int mySequenceNumber = seqStart;
int counter = SUBDIR_LIMIT - 1;
int subDirSuffix = 0;
String fullPath = destDirName;
String subdir = "";
File dir;
if (SUBDIR_LIMIT > 0)
{
dir = new File(destDirName);
if (!dir.isDirectory())
{
throw new IOException(destDirName + " is not a directory.");
}
}
System.out.println("Beginning export");
while (i.hasNext())
{
if (SUBDIR_LIMIT > 0 && ++counter == SUBDIR_LIMIT)
{
subdir = Integer.valueOf(subDirSuffix++).toString();
fullPath = destDirName + File.separatorChar + subdir;
counter = 0;
if (!new File(fullPath).mkdirs())
{
throw new IOException("Error, can't make dir " + fullPath);
}
}
System.out.println("Exporting item to " + mySequenceNumber);
exportItem(c, i.next(), fullPath, mySequenceNumber, migrate);
mySequenceNumber++;
}
}
private static void exportItem(Context c, Item myItem, String destDirName,
int seqStart, boolean migrate) throws Exception
{
File destDir = new File(destDirName);
if (destDir.exists())
{
// now create a subdirectory
File itemDir = new File(destDir + "/" + seqStart);
System.out.println("Exporting Item " + myItem.getID() + " to "
+ itemDir);
if (itemDir.exists())
{
throw new Exception("Directory " + destDir + "/" + seqStart
+ " already exists!");
}
if (itemDir.mkdir())
{
// make it this far, now start exporting
writeMetadata(c, myItem, itemDir, migrate);
writeBitstreams(c, myItem, itemDir);
if (!migrate)
{
writeHandle(c, myItem, itemDir);
}
}
else
{
throw new Exception("Error, can't make dir " + itemDir);
}
}
else
{
throw new Exception("Error, directory " + destDirName
+ " doesn't exist!");
}
}
/**
* Discover the different schemas in use and output a seperate metadata XML
* file for each schema.
*
* @param c
* @param i
* @param destDir
* @throws Exception
*/
private static void writeMetadata(Context c, Item i, File destDir, boolean migrate)
throws Exception
{
Set<String> schemas = new HashSet<String>();
DCValue[] dcValues = i.getMetadata(Item.ANY, Item.ANY, Item.ANY, Item.ANY);
for (DCValue dcValue : dcValues)
{
schemas.add(dcValue.schema);
}
// Save each of the schemas into it's own metadata file
for (String schema : schemas)
{
writeMetadata(c, schema, i, destDir, migrate);
}
}
// output the item's dublin core into the item directory
private static void writeMetadata(Context c, String schema, Item i,
File destDir, boolean migrate) throws Exception
{
String filename;
if (schema.equals(MetadataSchema.DC_SCHEMA))
{
filename = "dublin_core.xml";
}
else
{
filename = "metadata_" + schema + ".xml";
}
File outFile = new File(destDir, filename);
System.out.println("Attempting to create file " + outFile);
if (outFile.createNewFile())
{
BufferedOutputStream out = new BufferedOutputStream(
new FileOutputStream(outFile));
DCValue[] dcorevalues = i.getMetadata(schema, Item.ANY, Item.ANY,
Item.ANY);
// XML preamble
byte[] utf8 = "<?xml version=\"1.0\" encoding=\"utf-8\" standalone=\"no\"?>\n"
.getBytes("UTF-8");
out.write(utf8, 0, utf8.length);
String dcTag = "<dublin_core schema=\"" + schema + "\">\n";
utf8 = dcTag.getBytes("UTF-8");
out.write(utf8, 0, utf8.length);
String dateIssued = null;
String dateAccessioned = null;
for (DCValue dcv : dcorevalues)
{
String qualifier = dcv.qualifier;
if (qualifier == null)
{
qualifier = "none";
}
String language = dcv.language;
if (language != null)
{
language = " language=\"" + language + "\"";
}
else
{
language = "";
}
utf8 = (" <dcvalue element=\"" + dcv.element + "\" "
+ "qualifier=\"" + qualifier + "\""
+ language + ">"
+ Utils.addEntities(dcv.value) + "</dcvalue>\n")
.getBytes("UTF-8");
if ((!migrate) ||
(migrate && !(
("date".equals(dcv.element) && "issued".equals(qualifier)) ||
("date".equals(dcv.element) && "accessioned".equals(qualifier)) ||
("date".equals(dcv.element) && "available".equals(qualifier)) ||
("identifier".equals(dcv.element) && "uri".equals(qualifier) &&
(dcv.value != null && dcv.value.startsWith("http://hdl.handle.net/" +
HandleManager.getPrefix() + "/"))) ||
("description".equals(dcv.element) && "provenance".equals(qualifier)) ||
("format".equals(dcv.element) && "extent".equals(qualifier)) ||
("format".equals(dcv.element) && "mimetype".equals(qualifier)))))
{
out.write(utf8, 0, utf8.length);
}
// Store the date issued and accession to see if they are different
// because we need to keep date.issued if they are, when migrating
if (("date".equals(dcv.element) && "issued".equals(qualifier)))
{
dateIssued = dcv.value;
}
if (("date".equals(dcv.element) && "accessioned".equals(qualifier)))
{
dateAccessioned = dcv.value;
}
}
// When migrating, only keep date.issued if it is different to date.accessioned
if ((migrate) &&
(dateIssued != null) &&
(dateAccessioned != null) &&
(!dateIssued.equals(dateAccessioned)))
{
utf8 = (" <dcvalue element=\"date\" "
+ "qualifier=\"issued\">"
+ Utils.addEntities(dateIssued) + "</dcvalue>\n")
.getBytes("UTF-8");
out.write(utf8, 0, utf8.length);
}
utf8 = "</dublin_core>\n".getBytes("UTF-8");
out.write(utf8, 0, utf8.length);
out.close();
}
else
{
throw new Exception("Cannot create dublin_core.xml in " + destDir);
}
}
// create the file 'handle' which contains the handle assigned to the item
private static void writeHandle(Context c, Item i, File destDir)
throws Exception
{
if (i.getHandle() == null)
{
return;
}
String filename = "handle";
File outFile = new File(destDir, filename);
if (outFile.createNewFile())
{
PrintWriter out = new PrintWriter(new FileWriter(outFile));
out.println(i.getHandle());
// close the contents file
out.close();
}
else
{
throw new Exception("Cannot create file " + filename + " in "
+ destDir);
}
}
/**
* Create both the bitstreams and the contents file. Any bitstreams that
* were originally registered will be marked in the contents file as such.
* However, the export directory will contain actual copies of the content
* files being exported.
*
* @param c
* the DSpace context
* @param i
* the item being exported
* @param destDir
* the item's export directory
* @throws Exception
* if there is any problem writing to the export directory
*/
private static void writeBitstreams(Context c, Item i, File destDir)
throws Exception
{
File outFile = new File(destDir, "contents");
if (outFile.createNewFile())
{
PrintWriter out = new PrintWriter(new FileWriter(outFile));
Bundle[] bundles = i.getBundles();
for (int j = 0; j < bundles.length; j++)
{
// bundles can have multiple bitstreams now...
Bitstream[] bitstreams = bundles[j].getBitstreams();
String bundleName = bundles[j].getName();
for (int k = 0; k < bitstreams.length; k++)
{
Bitstream b = bitstreams[k];
String myName = b.getName();
String oldName = myName;
String description = b.getDescription();
if (!StringUtils.isEmpty(description))
{
description = "\tdescription:" + description;
} else
{
description = "";
}
String primary = "";
if (bundles[j].getPrimaryBitstreamID() == b.getID()) {
primary = "\tprimary:true ";
}
int myPrefix = 1; // only used with name conflict
InputStream is = b.retrieve();
boolean isDone = false; // done when bitstream is finally
// written
while (!isDone)
{
if (myName.contains(File.separator))
{
String dirs = myName.substring(0, myName
.lastIndexOf(File.separator));
File fdirs = new File(destDir + File.separator
+ dirs);
if (!fdirs.exists() && !fdirs.mkdirs())
{
log.error("Unable to create destination directory");
}
}
File fout = new File(destDir, myName);
if (fout.createNewFile())
{
FileOutputStream fos = new FileOutputStream(fout);
Utils.bufferedCopy(is, fos);
// close streams
is.close();
fos.close();
// write the manifest file entry
if (b.isRegisteredBitstream())
{
out.println("-r -s " + b.getStoreNumber()
+ " -f " + myName +
"\tbundle:" + bundleName +
primary + description);
}
else
{
out.println(myName + "\tbundle:" + bundleName +
primary + description);
}
isDone = true;
}
else
{
myName = myPrefix + "_" + oldName; // keep
// appending
// numbers to the
// filename until
// unique
myPrefix++;
}
}
}
}
// close the contents file
out.close();
}
else
{
throw new Exception("Cannot create contents in " + destDir);
}
}
/**
* Method to perform an export and save it as a zip file.
*
* @param context The DSpace Context
* @param items The items to export
* @param destDirName The directory to save the export in
* @param zipFileName The name to save the zip file as
* @param seqStart The first number in the sequence
* @param migrate Whether to use the migrate option or not
* @throws Exception
*/
public static void exportAsZip(Context context, ItemIterator items,
String destDirName, String zipFileName,
int seqStart, boolean migrate) throws Exception
{
String workDir = getExportWorkDirectory() +
System.getProperty("file.separator") +
zipFileName;
File wkDir = new File(workDir);
if (!wkDir.exists() && !wkDir.mkdirs())
{
log.error("Unable to create working direcory");
}
File dnDir = new File(destDirName);
if (!dnDir.exists() && !dnDir.mkdirs())
{
log.error("Unable to create destination directory");
}
// export the items using normal export method
exportItem(context, items, workDir, seqStart, migrate);
// now zip up the export directory created above
zip(workDir, destDirName + System.getProperty("file.separator") + zipFileName);
}
/**
* Convenience methot to create export a single Community, Collection, or
* Item
*
* @param dso
* - the dspace object to export
* @param context
* - the dspace context
* @throws Exception
*/
public static void createDownloadableExport(DSpaceObject dso,
Context context, boolean migrate) throws Exception
{
EPerson eperson = context.getCurrentUser();
ArrayList<DSpaceObject> list = new ArrayList<DSpaceObject>(1);
list.add(dso);
processDownloadableExport(list, context, eperson == null ? null
: eperson.getEmail(), migrate);
}
/**
* Convenience method to export a List of dspace objects (Community,
* Collection or Item)
*
* @param dsObjects
* - List containing dspace objects
* @param context
* - the dspace context
* @throws Exception
*/
public static void createDownloadableExport(List<DSpaceObject> dsObjects,
Context context, boolean migrate) throws Exception
{
EPerson eperson = context.getCurrentUser();
processDownloadableExport(dsObjects, context, eperson == null ? null
: eperson.getEmail(), migrate);
}
/**
* Convenience methot to create export a single Community, Collection, or
* Item
*
* @param dso
* - the dspace object to export
* @param context
* - the dspace context
* @param additionalEmail
* - cc email to use
* @throws Exception
*/
public static void createDownloadableExport(DSpaceObject dso,
Context context, String additionalEmail, boolean migrate) throws Exception
{
ArrayList<DSpaceObject> list = new ArrayList<DSpaceObject>(1);
list.add(dso);
processDownloadableExport(list, context, additionalEmail, migrate);
}
/**
* Convenience method to export a List of dspace objects (Community,
* Collection or Item)
*
* @param dsObjects
* - List containing dspace objects
* @param context
* - the dspace context
* @param additionalEmail
* - cc email to use
* @throws Exception
*/
public static void createDownloadableExport(List<DSpaceObject> dsObjects,
Context context, String additionalEmail, boolean migrate) throws Exception
{
processDownloadableExport(dsObjects, context, additionalEmail, migrate);
}
/**
* Does the work creating a List with all the Items in the Community or
* Collection It then kicks off a new Thread to export the items, zip the
* export directory and send confirmation email
*
* @param dsObjects
* - List of dspace objects to process
* @param context
* - the dspace context
* @param additionalEmail
* - email address to cc in addition the the current user email
* @throws Exception
*/
private static void processDownloadableExport(List<DSpaceObject> dsObjects,
Context context, final String additionalEmail, boolean toMigrate) throws Exception
{
final EPerson eperson = context.getCurrentUser();
final boolean migrate = toMigrate;
// before we create a new export archive lets delete the 'expired'
// archives
//deleteOldExportArchives(eperson.getID());
deleteOldExportArchives();
// keep track of the commulative size of all bitstreams in each of the
// items
// it will be checked against the config file entry
double size = 0;
final ArrayList<Integer> items = new ArrayList<Integer>();
for (DSpaceObject dso : dsObjects)
{
if (dso.getType() == Constants.COMMUNITY)
{
Community community = (Community) dso;
// get all the collections in the community
Collection[] collections = community.getCollections();
for (Collection collection : collections)
{
// get all the items in each collection
ItemIterator iitems = collection.getItems();
try
{
while (iitems.hasNext())
{
Item item = iitems.next();
// get all the bundles in the item
Bundle[] bundles = item.getBundles();
for (Bundle bundle : bundles)
{
// get all the bitstreams in each bundle
Bitstream[] bitstreams = bundle.getBitstreams();
for (Bitstream bit : bitstreams)
{
// add up the size
size += bit.getSize();
}
}
items.add(item.getID());
}
}
finally
{
if (iitems != null)
{
iitems.close();
}
}
}
}
else if (dso.getType() == Constants.COLLECTION)
{
Collection collection = (Collection) dso;
// get all the items in the collection
ItemIterator iitems = collection.getItems();
try
{
while (iitems.hasNext())
{
Item item = iitems.next();
// get all thebundles in the item
Bundle[] bundles = item.getBundles();
for (Bundle bundle : bundles)
{
// get all the bitstreams in the bundle
Bitstream[] bitstreams = bundle.getBitstreams();
for (Bitstream bit : bitstreams)
{
// add up the size
size += bit.getSize();
}
}
items.add(item.getID());
}
}
finally
{
if (iitems != null)
{
iitems.close();
}
}
}
else if (dso.getType() == Constants.ITEM)
{
Item item = (Item) dso;
// get all the bundles in the item
Bundle[] bundles = item.getBundles();
for (Bundle bundle : bundles)
{
// get all the bitstreams in the bundle
Bitstream[] bitstreams = bundle.getBitstreams();
for (Bitstream bit : bitstreams)
{
// add up the size
size += bit.getSize();
}
}
items.add(item.getID());
}
else
{
// nothing to do just ignore this type of DSPaceObject
}
}
// check the size of all the bitstreams against the configuration file
// entry if it exists
String megaBytes = ConfigurationManager
.getProperty("org.dspace.app.itemexport.max.size");
if (megaBytes != null)
{
float maxSize = 0;
try
{
maxSize = Float.parseFloat(megaBytes);
}
catch (Exception e)
{
// ignore...configuration entry may not be present
}
if (maxSize > 0 && maxSize < (size / 1048576.00))
{ // a megabyte
throw new ItemExportException(ItemExportException.EXPORT_TOO_LARGE,
"The overall size of this export is too large. Please contact your administrator for more information.");
}
}
// if we have any items to process then kick off annonymous thread
if (items.size() > 0)
{
Thread go = new Thread()
{
public void run()
{
Context context = null;
ItemIterator iitems = null;
try
{
// create a new dspace context
context = new Context();
// ignore auths
context.setIgnoreAuthorization(true);
iitems = new ItemIterator(context, items);
String fileName = assembleFileName("item", eperson,
new Date());
String workDir = getExportWorkDirectory()
+ System.getProperty("file.separator")
+ fileName;
String downloadDir = getExportDownloadDirectory(eperson
.getID());
File wkDir = new File(workDir);
if (!wkDir.exists() && !wkDir.mkdirs())
{
log.error("Unable to create working directory");
}
File dnDir = new File(downloadDir);
if (!dnDir.exists() && !dnDir.mkdirs())
{
log.error("Unable to create download directory");
}
// export the items using normal export method
exportItem(context, iitems, workDir, 1, migrate);
// now zip up the export directory created above
zip(workDir, downloadDir
+ System.getProperty("file.separator")
+ fileName + ".zip");
// email message letting user know the file is ready for
// download
emailSuccessMessage(context, eperson, fileName + ".zip");
// return to enforcing auths
context.setIgnoreAuthorization(false);
}
catch (Exception e1)
{
try
{
emailErrorMessage(eperson, e1.getMessage());
}
catch (Exception e)
{
// wont throw here
}
throw new IllegalStateException(e1);
}
finally
{
if (iitems != null)
{
iitems.close();
}
// Make sure the database connection gets closed in all conditions.
try {
context.complete();
} catch (SQLException sqle) {
context.abort();
}
}
}
};
go.isDaemon();
go.start();
}
}
/**
* Create a file name based on the date and eperson
*
* @param eperson
* - eperson who requested export and will be able to download it
* @param date
* - the date the export process was created
* @return String representing the file name in the form of
* 'export_yyy_MMM_dd_count_epersonID'
* @throws Exception
*/
public static String assembleFileName(String type, EPerson eperson,
Date date) throws Exception
{
// to format the date
SimpleDateFormat sdf = new SimpleDateFormat("yyyy_MMM_dd");
String downloadDir = getExportDownloadDirectory(eperson.getID());
// used to avoid name collision
int count = 1;
boolean exists = true;
String fileName = null;
while (exists)
{
fileName = type + "_export_" + sdf.format(date) + "_" + count + "_"
+ eperson.getID();
exists = new File(downloadDir
+ System.getProperty("file.separator") + fileName + ".zip")
.exists();
count++;
}
return fileName;
}
/**
* Use config file entry for org.dspace.app.itemexport.download.dir and id
* of the eperson to create a download directory name
*
* @param ePersonID
* - id of the eperson who requested export archive
* @return String representing a directory in the form of
* org.dspace.app.itemexport.download.dir/epersonID
* @throws Exception
*/
public static String getExportDownloadDirectory(int ePersonID)
throws Exception
{
String downloadDir = ConfigurationManager
.getProperty("org.dspace.app.itemexport.download.dir");
if (downloadDir == null)
{
throw new Exception(
"A dspace.cfg entry for 'org.dspace.app.itemexport.download.dir' does not exist.");
}
return downloadDir + System.getProperty("file.separator") + ePersonID;
}
/**
* Returns config file entry for org.dspace.app.itemexport.work.dir
*
* @return String representing config file entry for
* org.dspace.app.itemexport.work.dir
* @throws Exception
*/
public static String getExportWorkDirectory() throws Exception
{
String exportDir = ConfigurationManager
.getProperty("org.dspace.app.itemexport.work.dir");
if (exportDir == null)
{
throw new Exception(
"A dspace.cfg entry for 'org.dspace.app.itemexport.work.dir' does not exist.");
}
return exportDir;
}
/**
* Used to read the export archived. Inteded for download.
*
* @param fileName
* the name of the file to download
* @param eperson
* the eperson requesting the download
* @return an input stream of the file to be downloaded
* @throws Exception
*/
public static InputStream getExportDownloadInputStream(String fileName,
EPerson eperson) throws Exception
{
File file = new File(getExportDownloadDirectory(eperson.getID())
+ System.getProperty("file.separator") + fileName);
if (file.exists())
{
return new FileInputStream(file);
}
else
{
return null;
}
}
/**
* Get the file size of the export archive represented by the file name
*
* @param fileName
* name of the file to get the size
* @return
* @throws Exception
*/
public static long getExportFileSize(String fileName) throws Exception
{
String strID = fileName.substring(fileName.lastIndexOf('_') + 1,
fileName.lastIndexOf('.'));
File file = new File(
getExportDownloadDirectory(Integer.parseInt(strID))
+ System.getProperty("file.separator") + fileName);
if (!file.exists() || !file.isFile())
{
throw new FileNotFoundException("The file "
+ getExportDownloadDirectory(Integer.parseInt(strID))
+ System.getProperty("file.separator") + fileName
+ " does not exist.");
}
return file.length();
}
public static long getExportFileLastModified(String fileName)
throws Exception
{
String strID = fileName.substring(fileName.lastIndexOf('_') + 1,
fileName.lastIndexOf('.'));
File file = new File(
getExportDownloadDirectory(Integer.parseInt(strID))
+ System.getProperty("file.separator") + fileName);
if (!file.exists() || !file.isFile())
{
throw new FileNotFoundException("The file "
+ getExportDownloadDirectory(Integer.parseInt(strID))
+ System.getProperty("file.separator") + fileName
+ " does not exist.");
}
return file.lastModified();
}
/**
* The file name of the export archive contains the eperson id of the person
* who created it When requested for download this method can check if the
* person requesting it is the same one that created it
*
* @param context
* dspace context
* @param fileName
* the file name to check auths for
* @return true if it is the same person false otherwise
*/
public static boolean canDownload(Context context, String fileName)
{
EPerson eperson = context.getCurrentUser();
if (eperson == null)
{
return false;
}
String strID = fileName.substring(fileName.lastIndexOf('_') + 1,
fileName.lastIndexOf('.'));
try
{
if (Integer.parseInt(strID) == eperson.getID())
{
return true;
}
}
catch (Exception e)
{
return false;
}
return false;
}
/**
* Reads the download directory for the eperson to see if any export
* archives are available
*
* @param eperson
* @return a list of file names representing export archives that have been
* processed
* @throws Exception
*/
public static List<String> getExportsAvailable(EPerson eperson)
throws Exception
{
File downloadDir = new File(getExportDownloadDirectory(eperson.getID()));
if (!downloadDir.exists() || !downloadDir.isDirectory())
{
return null;
}
List<String> fileNames = new ArrayList<String>();
for (String fileName : downloadDir.list())
{
if (fileName.contains("export") && fileName.endsWith(".zip"))
{
fileNames.add(fileName);
}
}
if (fileNames.size() > 0)
{
return fileNames;
}
return null;
}
/**
* A clean up method that is ran before a new export archive is created. It
* uses the config file entry 'org.dspace.app.itemexport.life.span.hours' to
* determine if the current exports are too old and need pruging
*
* @param epersonID
* - the id of the eperson to clean up
* @throws Exception
*/
public static void deleteOldExportArchives(int epersonID) throws Exception
{
int hours = ConfigurationManager
.getIntProperty("org.dspace.app.itemexport.life.span.hours");
Calendar now = Calendar.getInstance();
now.setTime(new Date());
now.add(Calendar.HOUR, (-hours));
File downloadDir = new File(getExportDownloadDirectory(epersonID));
if (downloadDir.exists())
{
File[] files = downloadDir.listFiles();
for (File file : files)
{
if (file.lastModified() < now.getTimeInMillis())
{
if (!file.delete())
{
log.error("Unable to delete export file");
}
}
}
}
}
/**
* A clean up method that is ran before a new export archive is created. It
* uses the config file entry 'org.dspace.app.itemexport.life.span.hours' to
* determine if the current exports are too old and need purgeing
* Removes all old exports, not just those for the person doing the export.
*
* @throws Exception
*/
public static void deleteOldExportArchives() throws Exception
{
int hours = ConfigurationManager.getIntProperty("org.dspace.app.itemexport.life.span.hours");
Calendar now = Calendar.getInstance();
now.setTime(new Date());
now.add(Calendar.HOUR, (-hours));
File downloadDir = new File(ConfigurationManager.getProperty("org.dspace.app.itemexport.download.dir"));
if (downloadDir.exists())
{
// Get a list of all the sub-directories, potentially one for each ePerson.
File[] dirs = downloadDir.listFiles();
for (File dir : dirs)
{
// For each sub-directory delete any old files.
File[] files = dir.listFiles();
for (File file : files)
{
if (file.lastModified() < now.getTimeInMillis())
{
if (!file.delete())
{
log.error("Unable to delete old files");
}
}
}
// If the directory is now empty then we delete it too.
if (dir.listFiles().length == 0)
{
if (!dir.delete())
{
log.error("Unable to delete directory");
}
}
}
}
}
/**
* Since the archive is created in a new thread we are unable to communicate
* with calling method about success or failure. We accomplis this
* communication with email instead. Send a success email once the export
* archive is complete and ready for download
*
* @param context
* - the current Context
* @param eperson
* - eperson to send the email to
* @param fileName
* - the file name to be downloaded. It is added to the url in
* the email
* @throws MessagingException
*/
public static void emailSuccessMessage(Context context, EPerson eperson,
String fileName) throws MessagingException
{
try
{
Locale supportedLocale = I18nUtil.getEPersonLocale(eperson);
Email email = ConfigurationManager.getEmail(I18nUtil.getEmailFilename(supportedLocale, "export_success"));
email.addRecipient(eperson.getEmail());
email.addArgument(ConfigurationManager.getProperty("dspace.url") + "/exportdownload/" + fileName);
email.addArgument(ConfigurationManager.getProperty("org.dspace.app.itemexport.life.span.hours"));
email.send();
}
catch (Exception e)
{
log.warn(LogManager.getHeader(context, "emailSuccessMessage", "cannot notify user of export"), e);
}
}
/**
* Since the archive is created in a new thread we are unable to communicate
* with calling method about success or failure. We accomplis this
* communication with email instead. Send an error email if the export
* archive fails
*
* @param eperson
* - EPerson to send the error message to
* @param error
* - the error message
* @throws MessagingException
*/
public static void emailErrorMessage(EPerson eperson, String error)
throws MessagingException
{
log.warn("An error occured during item export, the user will be notified. " + error);
try
{
Locale supportedLocale = I18nUtil.getEPersonLocale(eperson);
Email email = ConfigurationManager.getEmail(I18nUtil.getEmailFilename(supportedLocale, "export_error"));
email.addRecipient(eperson.getEmail());
email.addArgument(error);
email.addArgument(ConfigurationManager.getProperty("dspace.url") + "/feedback");
email.send();
}
catch (Exception e)
{
log.warn("error during item export error notification", e);
}
}
public static void zip(String strSource, String target) throws Exception
{
ZipOutputStream cpZipOutputStream = null;
String tempFileName = target + "_tmp";
try
{
File cpFile = new File(strSource);
if (!cpFile.isFile() && !cpFile.isDirectory())
{
return;
}
File targetFile = new File(tempFileName);
if (!targetFile.createNewFile())
{
log.warn("Target file already exists: " + targetFile.getName());
}
FileOutputStream fos = new FileOutputStream(tempFileName);
cpZipOutputStream = new ZipOutputStream(fos);
cpZipOutputStream.setLevel(9);
zipFiles(cpFile, strSource, tempFileName, cpZipOutputStream);
cpZipOutputStream.finish();
cpZipOutputStream.close();
cpZipOutputStream = null;
// Fix issue on Windows with stale file handles open before trying to delete them
System.gc();
deleteDirectory(cpFile);
if (!targetFile.renameTo(new File(target)))
{
log.error("Unable to rename file");
}
}
finally
{
if (cpZipOutputStream != null)
{
cpZipOutputStream.close();
}
}
}
private static void zipFiles(File cpFile, String strSource,
String strTarget, ZipOutputStream cpZipOutputStream)
throws Exception
{
int byteCount;
final int DATA_BLOCK_SIZE = 2048;
FileInputStream cpFileInputStream = null;
if (cpFile.isDirectory())
{
File[] fList = cpFile.listFiles();
for (int i = 0; i < fList.length; i++)
{
zipFiles(fList[i], strSource, strTarget, cpZipOutputStream);
}
}
else
{
try
{
if (cpFile.getAbsolutePath().equalsIgnoreCase(strTarget))
{
return;
}
String strAbsPath = cpFile.getPath();
String strZipEntryName = strAbsPath.substring(strSource
.length() + 1, strAbsPath.length());
// byte[] b = new byte[ (int)(cpFile.length()) ];
cpFileInputStream = new FileInputStream(cpFile);
ZipEntry cpZipEntry = new ZipEntry(strZipEntryName);
cpZipOutputStream.putNextEntry(cpZipEntry);
byte[] b = new byte[DATA_BLOCK_SIZE];
while ((byteCount = cpFileInputStream.read(b, 0,
DATA_BLOCK_SIZE)) != -1)
{
cpZipOutputStream.write(b, 0, byteCount);
}
// cpZipOutputStream.write(b, 0, (int)cpFile.length());
}
finally
{
if (cpFileInputStream != null)
{
cpFileInputStream.close();
}
cpZipOutputStream.closeEntry();
}
}
}
private static boolean deleteDirectory(File path)
{
if (path.exists())
{
File[] files = path.listFiles();
for (int i = 0; i < files.length; i++)
{
if (files[i].isDirectory())
{
deleteDirectory(files[i]);
}
else
{
if (!files[i].delete())
{
log.error("Unable to delete file: " + files[i].getName());
}
}
}
}
boolean pathDeleted = path.delete();
return (pathDeleted);
}
}
| Java |
/**
* The contents of this file are subject to the license and copyright
* detailed in the LICENSE and NOTICE files at the root of the source
* tree and available online at
*
* http://www.dspace.org/license/
*/
package org.dspace.app.itemexport;
/**
* An exception that can be thrown when error occur during item export
*/
public class ItemExportException extends Exception
{
public static final int EXPORT_TOO_LARGE = 0;
private int reason;
public ItemExportException(int r, String message)
{
super(message);
reason = r;
}
public int getReason()
{
return reason;
}
} | Java |
/**
* The contents of this file are subject to the license and copyright
* detailed in the LICENSE and NOTICE files at the root of the source
* tree and available online at
*
* http://www.dspace.org/license/
*/
package org.dspace.app.itemupdate;
import java.text.ParseException;
import java.util.regex.*;
import org.dspace.core.Constants;
/**
* Holds the elements of a line in the Contents Entry file
*
* Based on private methods in ItemImport
*
* Lacking a spec or full documentation for the file format,
* it looks from the source code that the ordering or elements is not fixed
*
* e.g.:
* 48217870-MIT.pdf\tbundle: bundlename\tpermissions: -r 'MIT Users'\tdescription: Full printable version (MIT only)
* permissions: -[r|w] ['group name']
* description: <the description of the file>
*
*
*/
public class ContentsEntry
{
public static final String HDR_BUNDLE = "bundle:";
public static final String HDR_PERMISSIONS = "permissions:";
public static final String HDR_DESCRIPTION = "description:";
public static final Pattern permissionsPattern = Pattern.compile("-([rw])\\s*'?([^']+)'?");
final String filename;
final String bundlename;
final String permissionsGroupName;
final int permissionsActionId;
final String description;
private ContentsEntry(String filename,
String bundlename,
int permissionsActionId,
String permissionsGroupName,
String description)
{
this.filename = filename;
this.bundlename = bundlename;
this.permissionsActionId = permissionsActionId;
this.permissionsGroupName = permissionsGroupName;
this.description = description;
}
/**
* Factory method parses a line from the Contents Entry file
*
* @param line
* @return the parsed ContentsEntry object
* @throws ParseException
*/
public static ContentsEntry parse(String line)
throws ParseException
{
String[] ar = line.split("\t");
ItemUpdate.pr("ce line split: " + ar.length);
String[] arp = new String[4];
arp[0] = ar[0]; //bitstream name doesn't have header and is always first
String groupName = null;
int actionId = -1;
if (ar.length > 1)
{
for (int i=1; i < ar.length; i++)
{
ItemUpdate.pr("ce " + i + " : " + ar[i]);
if (ar[i].startsWith(HDR_BUNDLE))
{
arp[1] = ar[i].substring(HDR_BUNDLE.length()).trim();
}
else if (ar[i].startsWith(HDR_PERMISSIONS))
{
arp[2] = ar[i].substring(HDR_PERMISSIONS.length()).trim();
// parse into actionId and group name
Matcher m = permissionsPattern.matcher(arp[2]);
if (m.matches())
{
String action = m.group(1); //
if (action.equals("r"))
{
actionId = Constants.READ;
}
else if (action.equals("w"))
{
actionId = Constants.WRITE;
}
groupName = m.group(2).trim();
}
}
else if (ar[i].startsWith(HDR_DESCRIPTION))
{
arp[3] = ar[i].substring(HDR_DESCRIPTION.length()).trim();
}
else
{
throw new ParseException("Unknown text in contents file: " + ar[i], 0);
}
}
}
return new ContentsEntry(arp[0], arp[1], actionId, groupName, arp[3]);
}
public String toString()
{
StringBuilder sb = new StringBuilder(filename);
if (bundlename != null)
{
sb.append(HDR_BUNDLE).append(" ").append(bundlename);
}
if (permissionsGroupName != null)
{
sb.append(HDR_PERMISSIONS);
if (permissionsActionId == Constants.READ)
{
sb.append(" -r ");
}
else if (permissionsActionId == Constants.WRITE)
{
sb.append(" -w ");
}
sb.append(permissionsGroupName);
}
if (description != null)
{
sb.append(HDR_DESCRIPTION).append(" ").append(description);
}
return sb.toString();
}
}
| Java |
/**
* The contents of this file are subject to the license and copyright
* detailed in the LICENSE and NOTICE files at the root of the source
* tree and available online at
*
* http://www.dspace.org/license/
*/
package org.dspace.app.itemupdate;
import java.io.File;
import java.io.IOException;
import java.sql.SQLException;
import java.text.ParseException;
import java.util.List;
import org.dspace.authorize.AuthorizeException;
import org.dspace.content.Bitstream;
import org.dspace.content.Bundle;
import org.dspace.content.DCDate;
import org.dspace.content.Item;
import org.dspace.core.Context;
/**
* Action to delete bitstreams
*
* Undo not supported for this UpdateAction
*
* Derivatives of the bitstream to be deleted are not also deleted
*
*/
public class DeleteBitstreamsAction extends UpdateBitstreamsAction
{
/**
* Delete bitstream from item
*
* @param context
* @param itarch
* @param isTest
* @param suppressUndo
* @throws IllegalArgumentException
* @throws ParseException
* @throws IOException
* @throws AuthorizeException
* @throws SQLException
*/
public void execute(Context context, ItemArchive itarch, boolean isTest,
boolean suppressUndo) throws IllegalArgumentException, IOException,
SQLException, AuthorizeException, ParseException
{
File f = new File(itarch.getDirectory(), ItemUpdate.DELETE_CONTENTS_FILE);
if (!f.exists())
{
ItemUpdate.pr("Warning: Delete_contents file for item " + itarch.getDirectoryName() + " not found.");
}
else
{
List<Integer> list = MetadataUtilities.readDeleteContentsFile(f);
if (list.isEmpty())
{
ItemUpdate.pr("Warning: empty delete_contents file for item " + itarch.getDirectoryName() );
}
else
{
for (int id : list)
{
try
{
Bitstream bs = Bitstream.find(context, id);
if (bs == null)
{
ItemUpdate.pr("Bitstream not found by id: " + id);
}
else
{
Bundle[] bundles = bs.getBundles();
for (Bundle b : bundles)
{
if (isTest)
{
ItemUpdate.pr("Delete bitstream with id = " + id);
}
else
{
b.removeBitstream(bs);
ItemUpdate.pr("Deleted bitstream with id = " + id);
}
}
if (alterProvenance)
{
DtoMetadata dtom = DtoMetadata.create("dc.description.provenance", "en", "");
String append = "Bitstream " + bs.getName() + " deleted on " + DCDate.getCurrent() + "; ";
Item item = bundles[0].getItems()[0];
ItemUpdate.pr("Append provenance with: " + append);
if (!isTest)
{
MetadataUtilities.appendMetadata(item, dtom, false, append);
}
}
}
}
catch(SQLException e)
{
ItemUpdate.pr("Error finding bitstream from id: " + id + " : " + e.toString());
}
}
}
}
}
}
| Java |
/**
* The contents of this file are subject to the license and copyright
* detailed in the LICENSE and NOTICE files at the root of the source
* tree and available online at
*
* http://www.dspace.org/license/
*/
package org.dspace.app.itemupdate;
/**
* Base class for Bitstream actions
*
*
*/
public abstract class UpdateBitstreamsAction implements UpdateAction {
protected boolean alterProvenance = true;
/**
* Set variable to indicate that the dc.description.provenance field may
* be changed as a result of Bitstream changes by ItemUpdate
* @param alterProvenance
*/
public void setAlterProvenance(boolean alterProvenance)
{
this.alterProvenance = alterProvenance;
}
/**
*
* @return boolean value to indicate whether the dc.description.provenance field may
* be changed as a result of Bitstream changes by ItemUpdate
*/
public boolean getAlterProvenance()
{
return alterProvenance;
}
}
| Java |
/**
* The contents of this file are subject to the license and copyright
* detailed in the LICENSE and NOTICE files at the root of the source
* tree and available online at
*
* http://www.dspace.org/license/
*/
package org.dspace.app.itemupdate;
import java.io.IOException;
import java.util.Properties;
import java.io.InputStream;
import java.io.FileInputStream;
import org.dspace.content.Bitstream;
/**
* Filter interface to be used by ItemUpdate
* to determine which bitstreams in an Item
* acceptable for removal.
*
*/
public abstract class BitstreamFilter {
protected Properties props = null;
/**
* The filter method
*
* @param bitstream
* @return whether the bitstream matches the criteria
* @throws BitstreamFilterException
*/
public abstract boolean accept(Bitstream bitstream) throws BitstreamFilterException;
/**
*
* @param filepath - The complete path for the properties file
* @throws IOException
*/
public void initProperties(String filepath)
throws IOException
{
props = new Properties();
InputStream in = null;
try
{
in = new FileInputStream(filepath);
props.load(in);
}
finally
{
if (in != null)
{
in.close();
}
}
}
}
| Java |
/**
* The contents of this file are subject to the license and copyright
* detailed in the LICENSE and NOTICE files at the root of the source
* tree and available online at
*
* http://www.dspace.org/license/
*/
package org.dspace.app.itemupdate;
import java.util.Iterator;
import java.util.LinkedHashMap;
import java.util.Map;
/**
* Container for UpdateActions
* Order of actions is very import for correct processing. This implementation
* supports an iterator that returns the actions in the order in which they are
* put in. Adding the same action a second time has no effect on this order.
*
*
*/
public class ActionManager implements Iterable<UpdateAction> {
private Map<Class<? extends UpdateAction>, UpdateAction> registry
= new LinkedHashMap<Class<? extends UpdateAction>, UpdateAction>();
public UpdateAction getUpdateAction(Class<? extends UpdateAction> actionClass)
throws InstantiationException, IllegalAccessException
{
UpdateAction action = registry.get(actionClass);
if (action == null)
{
action = actionClass.newInstance();
registry.put(actionClass, action);
}
return action;
}
/**
*
* @return whether any actions have been registered with this manager
*/
public boolean hasActions()
{
return !registry.isEmpty();
}
/**
* This implementation guarantees the iterator order is the same as the order
* in which updateActions have been added
*
* @return iterator for UpdateActions
*/
public Iterator<UpdateAction> iterator()
{
return new Iterator<UpdateAction>()
{
private Iterator<Class<? extends UpdateAction>> itr = registry.keySet().iterator();
public boolean hasNext()
{
return itr.hasNext();
}
public UpdateAction next()
{
return registry.get(itr.next());
}
//not supported
public void remove()
{
throw new UnsupportedOperationException();
}
};
}
}
| Java |
/**
* The contents of this file are subject to the license and copyright
* detailed in the LICENSE and NOTICE files at the root of the source
* tree and available online at
*
* http://www.dspace.org/license/
*/
package org.dspace.app.itemupdate;
import java.io.BufferedWriter;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileWriter;
import java.io.FileNotFoundException;
import java.io.FileOutputStream;
import java.io.InputStream;
import java.io.IOException;
import java.io.OutputStream;
import java.io.PrintWriter;
import java.sql.SQLException;
import java.util.ArrayList;
import java.util.List;
import javax.xml.parsers.DocumentBuilder;
import javax.xml.parsers.DocumentBuilderFactory;
import javax.xml.parsers.ParserConfigurationException;
import javax.xml.transform.Transformer;
import javax.xml.transform.TransformerException;
import javax.xml.transform.TransformerFactory;
import javax.xml.transform.TransformerConfigurationException;
import org.apache.log4j.Logger;
import org.dspace.content.ItemIterator;
import org.dspace.authorize.AuthorizeException;
import org.dspace.content.DSpaceObject;
import org.dspace.content.Item;
import org.dspace.core.Context;
import org.dspace.handle.HandleManager;
import org.w3c.dom.Document;
/**
* Encapsulates the Item in the context of the DSpace Archive Format
*
*/
public class ItemArchive {
private static final Logger log = Logger.getLogger(ItemArchive.class);
public static final String DUBLIN_CORE_XML = "dublin_core.xml";
private static DocumentBuilder builder = null;
private static Transformer transformer = null;
private List<DtoMetadata> dtomList = null;
private List<DtoMetadata> undoDtomList = new ArrayList<DtoMetadata>();
private List<Integer> undoAddContents = new ArrayList<Integer>(); // for undo of add
private Item item;
private File dir; // directory name in source archive for this item
private String dirname; //convenience
//constructors
private ItemArchive()
{
// nothing
}
/** factory method
*
* Minimal requirements for dublin_core.xml for this application
* is the presence of dc.identifier.uri
* which must contain the handle for the item
*
* @param context - The DSpace context
* @param dir - The directory File in the source archive
* @param itemField - The metadata field in which the Item identifier is located
* if null, the default is the handle in the dc.identifier.uri field
*
*/
public static ItemArchive create(Context context, File dir, String itemField)
throws Exception
{
ItemArchive itarch = new ItemArchive();
itarch.dir = dir;
itarch.dirname = dir.getName();
InputStream is = null;
try
{
is = new FileInputStream(new File(dir, DUBLIN_CORE_XML));
itarch.dtomList = MetadataUtilities.loadDublinCore(getDocumentBuilder(), is);
}
finally
{
if (is != null)
{
is.close();
}
}
ItemUpdate.pr("Loaded metadata with " + itarch.dtomList.size() + " fields");
if (itemField == null)
{
itarch.item = itarch.itemFromHandleInput(context); // sets the item instance var and seeds the undo list
}
else
{
itarch.item = itarch.itemFromMetadataField(context, itemField);
}
if (itarch.item == null)
{
throw new Exception("Item not instantiated: " + itarch.dirname);
}
ItemUpdate.prv("item instantiated: " + itarch.item.getHandle());
return itarch;
}
private static DocumentBuilder getDocumentBuilder()
throws ParserConfigurationException
{
if (builder == null)
{
builder = DocumentBuilderFactory.newInstance().newDocumentBuilder();
}
return builder;
}
private static Transformer getTransformer()
throws TransformerConfigurationException
{
if (transformer == null)
{
transformer = TransformerFactory.newInstance().newTransformer();
}
return transformer;
}
/**
* Getter for the DSpace item referenced in the archive
* @return DSpace item
*/
public Item getItem()
{
return item;
}
/**
* Getter for directory in archive on disk
* @return directory in archive
*/
public File getDirectory()
{
return dir;
}
/**
* Getter for directory name in archive
* @return directory name in archive
*/
public String getDirectoryName()
{
return dirname;
}
/**
* Add metadata field to undo list
* @param dtom
*/
public void addUndoMetadataField(DtoMetadata dtom)
{
this.undoDtomList.add(dtom);
}
/**
* Getter for list of metadata fields
* @return list of metadata fields
*/
public List<DtoMetadata> getMetadataFields()
{
return dtomList;
}
/**
* Add bitstream id to delete contents file
* @param bitstreamId
*/
public void addUndoDeleteContents(int bitstreamId)
{
this.undoAddContents.add(bitstreamId);
}
/**
* Obtain item from DSpace based on handle
* This is the default implementation
* that uses the dc.identifier.uri metadatafield
* that contains the item handle as its value
*
*/
private Item itemFromHandleInput(Context context)
throws SQLException, Exception
{
DtoMetadata dtom = getMetadataField("dc.identifier.uri");
if (dtom == null)
{
throw new Exception("No dc.identier.uri field found for handle");
}
this.addUndoMetadataField(dtom); //seed the undo list with the uri
String uri = dtom.value;
if (!uri.startsWith(ItemUpdate.HANDLE_PREFIX))
{
throw new Exception("dc.identifier.uri for item " + uri
+ " does not begin with prefix: " + ItemUpdate.HANDLE_PREFIX);
}
String handle = uri.substring(ItemUpdate.HANDLE_PREFIX.length());
DSpaceObject dso = HandleManager.resolveToObject(context, handle);
if (dso instanceof Item)
{
item = (Item) dso;
}
else
{
ItemUpdate.pr("Warning: item not instantiated");
throw new IllegalArgumentException("Item " + handle + " not instantiated.");
}
return item;
}
/**
* Find and instantiate Item from the dublin_core.xml based
* on the specified itemField for the item identifier,
*
*
* @param context - the DSpace context
* @param itemField - the compound form of the metadata element <schema>.<element>.<qualifier>
* @throws SQLException
* @throws Exception
*/
private Item itemFromMetadataField(Context context, String itemField)
throws SQLException, AuthorizeException, Exception
{
DtoMetadata dtom = getMetadataField(itemField);
Item item = null;
if (dtom == null)
{
throw new IllegalArgumentException("No field found for item identifier field: " + itemField);
}
ItemUpdate.prv("Metadata field to match for item: " + dtom.toString());
this.addUndoMetadataField(dtom); //seed the undo list with the identifier field
ItemIterator itr = Item.findByMetadataField(context, dtom.schema, dtom.element, dtom.qualifier, dtom.value);
int count = 0;
while (itr.hasNext())
{
item = itr.next();
count++;
}
itr.close();
ItemUpdate.prv("items matching = " + count );
if (count != 1)
{
throw new Exception ("" + count + " items matching item identifier: " + dtom.value);
}
return item;
}
private DtoMetadata getMetadataField(String compoundForm)
{
for (DtoMetadata dtom : dtomList)
{
if (dtom.matches(compoundForm, false))
{
return dtom;
}
}
return null;
}
/**
* write undo directory and files to Disk in archive format
*
*
* @param undoDir - the root directory of the undo archive
*/
public void writeUndo(File undoDir)
throws IOException, ParserConfigurationException, TransformerConfigurationException,
TransformerException, FileNotFoundException
{
// create directory for item
File dir = new File(undoDir, dirname);
if (!dir.exists() && !dir.mkdir())
{
log.error("Unable to create undo directory");
}
OutputStream out = null;
try
{
out = new FileOutputStream(new File(dir, "dublin_core.xml"));
Document doc = MetadataUtilities.writeDublinCore(getDocumentBuilder(), undoDtomList);
MetadataUtilities.writeDocument(doc, getTransformer(), out);
// if undo has delete bitstream
if (undoAddContents.size() > 0)
{
PrintWriter pw = null;
try
{
File f = new File(dir, ItemUpdate.DELETE_CONTENTS_FILE);
pw = new PrintWriter(new BufferedWriter(new FileWriter(f)));
for (Integer i : undoAddContents)
{
pw.println(i);
}
}
finally
{
pw.close();
}
}
}
finally
{
if (out != null)
{
out.close();
}
}
}
} //end class
| Java |
/**
* The contents of this file are subject to the license and copyright
* detailed in the LICENSE and NOTICE files at the root of the source
* tree and available online at
*
* http://www.dspace.org/license/
*/
package org.dspace.app.itemupdate;
import java.sql.SQLException;
import org.dspace.content.Bitstream;
import org.dspace.content.Bundle;
/**
* Filter all bitstreams in the ORIGINAL bundle
* Also delete all derivative bitstreams, i.e.
* all bitstreams in the TEXT and THUMBNAIL bundles
*/
public class OriginalWithDerivativesBitstreamFilter extends BitstreamFilter
{
private String[] bundlesToEmpty = { "ORIGINAL", "TEXT", "THUMBNAIL" };
public OriginalWithDerivativesBitstreamFilter()
{
//empty
}
/**
* Tests bitstream for membership in specified bundles (ORIGINAL, TEXT, THUMBNAIL)
*
* @param bitstream
* @throws BitstreamFilterException
* @return true if bitstream is in specified bundles
*/
public boolean accept(Bitstream bitstream)
throws BitstreamFilterException
{
try
{
Bundle[] bundles = bitstream.getBundles();
for (Bundle b : bundles)
{
for (String bn : bundlesToEmpty)
{
if (b.getName().equals(bn))
{
return true;
}
}
}
}
catch(SQLException e)
{
throw new BitstreamFilterException(e);
}
return false;
}
}
| Java |
/**
* The contents of this file are subject to the license and copyright
* detailed in the LICENSE and NOTICE files at the root of the source
* tree and available online at
*
* http://www.dspace.org/license/
*/
package org.dspace.app.itemupdate;
import org.dspace.core.Context;
/**
* Interface for actions to update an item
*
*/
public interface UpdateAction
{
/**
* Action to update item
*
* @param context
* @param itarch
* @param isTest
* @param suppressUndo
* @throws Exception
*/
public void execute(Context context, ItemArchive itarch, boolean isTest, boolean suppressUndo)
throws Exception;
}
| Java |
/**
* The contents of this file are subject to the license and copyright
* detailed in the LICENSE and NOTICE files at the root of the source
* tree and available online at
*
* http://www.dspace.org/license/
*/
package org.dspace.app.itemupdate;
import java.text.ParseException;
import org.dspace.authorize.AuthorizeException;
import org.dspace.content.DCValue;
import org.dspace.content.Item;
import org.dspace.core.Context;
/**
* Action to delete metadata
*
*
*/
public class DeleteMetadataAction extends UpdateMetadataAction {
/**
* Delete metadata from item
*
* @param context
* @param itarch
* @param isTest
* @param suppressUndo
* @throws ParseException
* @throws AuthorizeException
*/
public void execute(Context context, ItemArchive itarch, boolean isTest,
boolean suppressUndo) throws AuthorizeException, ParseException
{
Item item = itarch.getItem();
for (String f : targetFields)
{
DtoMetadata dummy = DtoMetadata.create(f, Item.ANY, "");
DCValue[] ardcv = item.getMetadata(f);
ItemUpdate.pr("Metadata to be deleted: ");
for (DCValue dcv : ardcv)
{
ItemUpdate.pr(" " + MetadataUtilities.getDCValueString(dcv));
}
if (!isTest)
{
if (!suppressUndo)
{
for (DCValue dcv : ardcv)
{
itarch.addUndoMetadataField(DtoMetadata.create(dcv.schema, dcv.element,
dcv.qualifier, dcv.language, dcv.value));
}
}
item.clearMetadata(dummy.schema, dummy.element, dummy.qualifier, Item.ANY);
}
}
}
}
| Java |
/**
* The contents of this file are subject to the license and copyright
* detailed in the LICENSE and NOTICE files at the root of the source
* tree and available online at
*
* http://www.dspace.org/license/
*/
package org.dspace.app.itemupdate;
import java.text.ParseException;
import org.dspace.content.Item;
/**
* A data transfer object class enhancement of org.dspace.content.DCValue, which is deprecated
* Name intended to not conflict with DSpace API classes for similar concepts but not usable in this context
*
* Adds some utility methods
*
* Really not at all general enough but supports Dublin Core and the compound form notation <schema>.<element>[.<qualifier>]
*
* Does not support wildcard for qualifier
*
*
*/
class DtoMetadata
{
final String schema;
final String element;
final String qualifier;
final String language;
final String value;
private DtoMetadata(String schema, String element, String qualifier, String language, String value)
{
this.schema = schema;
this.element = element;
this.qualifier = qualifier;
this.language = language;
this.value = value;
}
/**
* Factory method
*
*
* @param schema not null, not empty - 'dc' is the standard case
* @param element not null, not empty
* @param qualifier null; don't allow empty string or * indicating 'any'
* @param language null or empty
* @param value
* @return DtoMetadata object
*/
public static DtoMetadata create(String schema,
String element,
String qualifier,
String language,
String value)
throws IllegalArgumentException
{
if ((qualifier != null) && (qualifier.equals(Item.ANY) || qualifier.equals("")))
{
throw new IllegalArgumentException("Invalid qualifier: " + qualifier);
}
return new DtoMetadata(schema, element, qualifier, language, value);
}
/**
* Factory method to create metadata object
*
*
* @param compoundForm of the form <schema>.<element>[.<qualifier>]
* @param language null or empty
* @param value
*/
public static DtoMetadata create(String compoundForm, String language, String value)
throws ParseException, IllegalArgumentException
{
String[] ar = MetadataUtilities.parseCompoundForm(compoundForm);
String qual = null;
if (ar.length > 2)
{
qual = ar[2];
}
return create(ar[0], ar[1], qual, language, value);
}
/**
* Determine if this metadata field matches the specified type:
* schema.element or schema.element.qualifier
*
*
* @param compoundForm of the form <schema>.<element>[.<qualifier>|.*]
* @param wildcard allow wildcards in compoundForm param
* @return whether matches
*/
public boolean matches(String compoundForm, boolean wildcard)
{
String[] ar = compoundForm.split("\\s*\\.\\s*"); //MetadataUtilities.parseCompoundForm(compoundForm);
if ((ar.length < 2) || (ar.length > 3))
{
return false;
}
if (!this.schema.equals(ar[0]) || !this.element.equals(ar[1]))
{
return false;
}
if (ar.length == 2)
{
if (this.qualifier != null)
{
return false;
}
}
if (ar.length == 3)
{
if (this.qualifier == null)
{
return false;
}
if (wildcard && ar[2].equals(Item.ANY))
{
return true;
}
if (!this.qualifier.equals(ar[2]))
{
return false;
}
}
return true;
}
public String toString()
{
String s = "\tSchema: " + schema + " Element: " + element;
if (qualifier != null)
{
s+= " Qualifier: " + qualifier;
}
s+= " Language: " + ((language == null) ? "[null]" : language);
s += " Value: " + value;
return s;
}
public String getValue()
{
return value;
}
}
| Java |
/**
* The contents of this file are subject to the license and copyright
* detailed in the LICENSE and NOTICE files at the root of the source
* tree and available online at
*
* http://www.dspace.org/license/
*/
package org.dspace.app.itemupdate;
import java.util.HashSet;
import java.util.Set;
/**
* This abstract subclass for metadata actions
* maintains a collection for the target metadata fields
* expressed as a string in the compound notation ( <schema>.<element>.<qualifier> )
* on which to apply the action when the method execute is called.
*
* Implemented as a Set to avoid problems with duplicates
*
*
*/
public abstract class UpdateMetadataAction implements UpdateAction {
protected Set<String> targetFields = new HashSet<String>();
/**
* Get target fields
*
* @return set of fields to update
*/
public Set<String> getTargetFields() {
return targetFields;
}
/**
* Set target fields
*
* @param targetFields
*/
public void addTargetFields(Set<String> targetFields) {
for (String tf : targetFields)
{
this.targetFields.add(tf);
}
}
/**
* Add array of target fields to update
* @param targetFields
*/
public void addTargetFields(String[] targetFields) {
for (String tf : targetFields)
{
this.targetFields.add(tf);
}
}
/**
* Add single field to update
*
* @param targetField
*/
public void addTargetField(String targetField) {
this.targetFields.add(targetField);
}
}
| Java |
/**
* The contents of this file are subject to the license and copyright
* detailed in the LICENSE and NOTICE files at the root of the source
* tree and available online at
*
* http://www.dspace.org/license/
*/
package org.dspace.app.itemupdate;
import java.io.IOException;
import java.sql.SQLException;
import java.text.ParseException;
import java.util.ArrayList;
import java.util.List;
import org.dspace.authorize.AuthorizeException;
import org.dspace.content.Bitstream;
import org.dspace.content.Bundle;
import org.dspace.content.DCDate;
import org.dspace.content.Item;
import org.dspace.core.Context;
/**
* Action to delete bitstreams using a specified filter implementing BitstreamFilter
* Derivatives for the target bitstreams are not deleted.
*
* The dc.description.provenance field is amended to reflect the deletions
*
* Note: Multiple filters are impractical if trying to manage multiple properties files
* in a commandline environment
*
*
*/
public class DeleteBitstreamsByFilterAction extends UpdateBitstreamsAction {
private BitstreamFilter filter;
/**
* Set filter
*
* @param filter
*/
public void setBitstreamFilter(BitstreamFilter filter)
{
this.filter = filter;
}
/**
* Get filter
* @return filter
*/
public BitstreamFilter getBitstreamFilter()
{
return filter;
}
/**
* Delete bitstream
*
* @param context
* @param itarch
* @param isTest
* @param suppressUndo
* @throws IllegalArgumentException
* @throws ParseException
* @throws IOException
* @throws AuthorizeException
* @throws SQLException
*/
public void execute(Context context, ItemArchive itarch, boolean isTest,
boolean suppressUndo) throws AuthorizeException,
BitstreamFilterException, IOException, ParseException, SQLException
{
List<String> deleted = new ArrayList<String>();
Item item = itarch.getItem();
Bundle[] bundles = item.getBundles();
for (Bundle b : bundles)
{
Bitstream[] bitstreams = b.getBitstreams();
String bundleName = b.getName();
for (Bitstream bs : bitstreams)
{
if (filter.accept(bs))
{
if (isTest)
{
ItemUpdate.pr("Delete from bundle " + bundleName + " bitstream " + bs.getName()
+ " with id = " + bs.getID());
}
else
{
//provenance is not maintained for derivative bitstreams
if (!bundleName.equals("THUMBNAIL") && !bundleName.equals("TEXT"))
{
deleted.add(bs.getName());
}
b.removeBitstream(bs);
ItemUpdate.pr("Deleted " + bundleName + " bitstream " + bs.getName()
+ " with id = " + bs.getID());
}
}
}
}
if (alterProvenance && !deleted.isEmpty())
{
StringBuilder sb = new StringBuilder(" Bitstreams deleted on ");
sb.append(DCDate.getCurrent()).append(": ");
for (String s : deleted)
{
sb.append(s).append(", ");
}
DtoMetadata dtom = DtoMetadata.create("dc.description.provenance", "en", "");
ItemUpdate.pr("Append provenance with: " + sb.toString());
if (!isTest)
{
MetadataUtilities.appendMetadata(item, dtom, false, sb.toString());
}
}
}
}
| Java |
/**
* The contents of this file are subject to the license and copyright
* detailed in the LICENSE and NOTICE files at the root of the source
* tree and available online at
*
* http://www.dspace.org/license/
*/
package org.dspace.app.itemupdate;
import java.sql.SQLException;
import org.dspace.authorize.AuthorizeException;
import org.dspace.content.DCValue;
import org.dspace.content.Item;
import org.dspace.content.MetadataField;
import org.dspace.content.MetadataSchema;
import org.dspace.core.Context;
/**
* Action to add metadata to item
*
*/
public class AddMetadataAction extends UpdateMetadataAction {
/**
* Adds metadata specified in the source archive
*
* @param context
* @param itarch
* @param isTest
* @param suppressUndo
* @throws AuthorizeException
* @throws SQLException
*/
public void execute(Context context, ItemArchive itarch, boolean isTest,
boolean suppressUndo) throws AuthorizeException, SQLException
{
Item item = itarch.getItem();
String dirname = itarch.getDirectoryName();
for (DtoMetadata dtom : itarch.getMetadataFields())
{
for (String f : targetFields)
{
if (dtom.matches(f, false))
{
// match against metadata for this field/value in repository
// qualifier must be strictly matched, possibly null
DCValue[] ardcv = null;
ardcv = item.getMetadata(dtom.schema, dtom.element, dtom.qualifier, Item.ANY);
boolean found = false;
for (DCValue dcv : ardcv)
{
if (dcv.value.equals(dtom.value))
{
found = true;
break;
}
}
if (found)
{
ItemUpdate.pr("Warning: No new metadata found to add to item " + dirname
+ " for element " + f);
}
else
{
if (isTest)
{
ItemUpdate.pr("Metadata to add: " + dtom.toString());
//validity tests that would occur in actual processing
// If we're just test the import, let's check that the actual metadata field exists.
MetadataSchema foundSchema = MetadataSchema.find(context, dtom.schema);
if (foundSchema == null)
{
ItemUpdate.pr("ERROR: schema '"
+ dtom.schema + "' was not found in the registry; found on item " + dirname);
}
else
{
int schemaID = foundSchema.getSchemaID();
MetadataField foundField = MetadataField.findByElement(context, schemaID, dtom.element, dtom.qualifier);
if (foundField == null)
{
ItemUpdate.pr("ERROR: Metadata field: '" + dtom.schema + "." + dtom.element + "."
+ dtom.qualifier + "' not found in registry; found on item " + dirname);
}
}
}
else
{
item.addMetadata(dtom.schema, dtom.element, dtom.qualifier, dtom.language, dtom.value);
ItemUpdate.pr("Metadata added: " + dtom.toString());
if (!suppressUndo)
{
//itarch.addUndoDtom(dtom);
//ItemUpdate.pr("Undo metadata: " + dtom);
// add all as a replace record to be preceded by delete
for (DCValue dcval : ardcv)
{
itarch.addUndoMetadataField(DtoMetadata.create(dcval.schema, dcval.element,
dcval.qualifier, dcval.language, dcval.value));
}
}
}
}
break; // don't need to check if this field matches any other target fields
}
}
}
}
}
| Java |
/**
* The contents of this file are subject to the license and copyright
* detailed in the LICENSE and NOTICE files at the root of the source
* tree and available online at
*
* http://www.dspace.org/license/
*/
package org.dspace.app.itemupdate;
import java.io.BufferedReader;
import java.io.File;
import java.io.FileNotFoundException;
import java.io.FileReader;
import java.io.InputStream;
import java.io.IOException;
import java.io.OutputStream;
import java.sql.SQLException;
import java.text.ParseException;
import java.util.ArrayList;
import java.util.List;
import javax.xml.parsers.DocumentBuilder;
import javax.xml.parsers.ParserConfigurationException;
import javax.xml.transform.Result;
import javax.xml.transform.Source;
import javax.xml.transform.Transformer;
import javax.xml.transform.TransformerConfigurationException;
import javax.xml.transform.TransformerException;
import javax.xml.transform.dom.DOMSource;
import javax.xml.transform.stream.StreamResult;
import org.apache.commons.lang.StringUtils;
import org.apache.xpath.XPathAPI;
import org.w3c.dom.Document;
import org.w3c.dom.Element;
import org.w3c.dom.NamedNodeMap;
import org.w3c.dom.Node;
import org.w3c.dom.NodeList;
import org.xml.sax.SAXException;
import org.dspace.authorize.AuthorizeException;
import org.dspace.content.DCValue;
import org.dspace.content.Item;
import org.dspace.content.MetadataSchema;
import org.dspace.core.ConfigurationManager;
/**
* Miscellaneous methods for metadata handling that build on the API
* which might have general utility outside of the specific use
* in context in ItemUpdate.
*
* The XML methods were based on those in ItemImport
*
*
*/
public class MetadataUtilities {
/**
*
* Working around Item API to delete a value-specific DCValue
* For a given element/qualifier/lang:
* get all DCValues
* clear (i.e. delete) all of these DCValues
* add them back, minus the one to actually delete
*
*
* @param item
* @param dtom
* @param isLanguageStrict -
*
* @return true if metadata field is found with matching value and was deleted
*/
public static boolean deleteMetadataByValue(Item item, DtoMetadata dtom, boolean isLanguageStrict)
{
DCValue[] ar = null;
if (isLanguageStrict)
{ // get all for given type
ar = item.getMetadata(dtom.schema, dtom.element, dtom.qualifier, dtom.language);
}
else
{
ar = item.getMetadata(dtom.schema, dtom.element, dtom.qualifier, Item.ANY);
}
boolean found = false;
//build new set minus the one to delete
List<String> vals = new ArrayList<String>();
for (DCValue dcv : ar)
{
if (dcv.value.equals(dtom.value))
{
found = true;
}
else
{
vals.add(dcv.value);
}
}
if (found) //remove all for given type ??synchronize this block??
{
if (isLanguageStrict)
{
item.clearMetadata(dtom.schema, dtom.element, dtom.qualifier, dtom.language);
}
else
{
item.clearMetadata(dtom.schema, dtom.element, dtom.qualifier, Item.ANY);
}
item.addMetadata(dtom.schema, dtom.element, dtom.qualifier, dtom.language, vals.toArray(new String[vals.size()]));
}
return found;
}
/**
* Append text to value metadata field to item
*
* @param item
* @param dtom
* @param isLanguageStrict
* @param textToAppend
* @throws IllegalArgumentException - When target metadata field is not found
*/
public static void appendMetadata(Item item, DtoMetadata dtom, boolean isLanguageStrict,
String textToAppend)
throws IllegalArgumentException
{
DCValue[] ar = null;
// get all values for given element/qualifier
if (isLanguageStrict) // get all for given element/qualifier
{
ar = item.getMetadata(dtom.schema, dtom.element, dtom.qualifier, dtom.language);
}
else
{
ar = item.getMetadata(dtom.schema, dtom.element, dtom.qualifier, Item.ANY);
}
if (ar.length == 0)
{
throw new IllegalArgumentException("Metadata to append to not found");
}
int idx = 0; //index of field to change
if (ar.length > 1) //need to pick one, can't be sure it's the last one
{
// TODO maybe get highest id ?
}
//build new set minus the one to delete
List<String> vals = new ArrayList<String>();
for (int i=0; i < ar.length; i++)
{
if (i == idx)
{
vals.add(ar[i].value + textToAppend);
}
else
{
vals.add(ar[i].value);
}
}
if (isLanguageStrict)
{
item.clearMetadata(dtom.schema, dtom.element, dtom.qualifier, dtom.language);
}
else
{
item.clearMetadata(dtom.schema, dtom.element, dtom.qualifier, Item.ANY);
}
item.addMetadata(dtom.schema, dtom.element, dtom.qualifier, dtom.language, vals.toArray(new String[vals.size()]));
}
/**
* Modification of method from ItemImporter.loadDublinCore
* as a Factory method
*
* @param docBuilder -
* @param is - InputStream of dublin_core.xml
* @return list of DtoMetadata representing the metadata fields relating to an Item
* @throws SQLException
* @throws IOException
* @throws ParserConfigurationException
* @throws SAXException
* @throws TransformerException
* @throws AuthorizeException
*/
public static List<DtoMetadata> loadDublinCore(DocumentBuilder docBuilder, InputStream is)
throws SQLException, IOException, ParserConfigurationException,
SAXException, TransformerException, AuthorizeException
{
Document document = docBuilder.parse(is);
List<DtoMetadata> dtomList = new ArrayList<DtoMetadata>();
// Get the schema, for backward compatibility we will default to the
// dublin core schema if the schema name is not available in the import file
String schema = null;
NodeList metadata = XPathAPI.selectNodeList(document, "/dublin_core");
Node schemaAttr = metadata.item(0).getAttributes().getNamedItem("schema");
if (schemaAttr == null)
{
schema = MetadataSchema.DC_SCHEMA;
}
else
{
schema = schemaAttr.getNodeValue();
}
// Get the nodes corresponding to formats
NodeList dcNodes = XPathAPI.selectNodeList(document, "/dublin_core/dcvalue");
for (int i = 0; i < dcNodes.getLength(); i++)
{
Node n = dcNodes.item(i);
String value = getStringValue(n).trim();
// compensate for empty value getting read as "null", which won't display
if (value == null)
{
value = "";
}
String element = getAttributeValue(n, "element");
if (element != null)
{
element = element.trim();
}
String qualifier = getAttributeValue(n, "qualifier");
if (qualifier != null)
{
qualifier = qualifier.trim();
}
String language = getAttributeValue(n, "language");
if (language != null)
{
language = language.trim();
}
if ("none".equals(qualifier) || "".equals(qualifier))
{
qualifier = null;
}
// a goofy default, but consistent with DSpace treatment elsewhere
if (language == null)
{
language = "en";
}
else if ("".equals(language))
{
language = ConfigurationManager.getProperty("default.language");
}
DtoMetadata dtom = DtoMetadata.create(schema, element, qualifier, language, value);
ItemUpdate.pr(dtom.toString());
dtomList.add(dtom);
}
return dtomList;
}
/**
* Write dublin_core.xml
*
* @param docBuilder
* @param dtomList
* @return xml document
* @throws ParserConfigurationException
* @throws TransformerConfigurationException
* @throws TransformerException
*/
public static Document writeDublinCore(DocumentBuilder docBuilder, List<DtoMetadata> dtomList)
throws ParserConfigurationException, TransformerConfigurationException, TransformerException
{
Document doc = docBuilder.newDocument();
Element root = doc.createElement("dublin_core");
doc.appendChild(root);
for (DtoMetadata dtom : dtomList)
{
Element mel = doc.createElement("dcvalue");
mel.setAttribute("element", dtom.element);
if (dtom.qualifier == null)
{
mel.setAttribute("qualifier", "none");
}
else
{
mel.setAttribute("qualifier", dtom.qualifier);
}
if (StringUtils.isEmpty(dtom.language))
{
mel.setAttribute("language", "en");
}
else
{
mel.setAttribute("language", dtom.language);
}
mel.setTextContent(dtom.value);
root.appendChild(mel);
}
return doc;
}
/**
* write xml document to output stream
* @param doc
* @param transformer
* @param out
* @throws IOException
* @throws TransformerException
*/
public static void writeDocument(Document doc, Transformer transformer, OutputStream out)
throws IOException, TransformerException
{
Source src = new DOMSource(doc);
Result dest = new StreamResult(out);
transformer.transform(src, dest);
}
// XML utility methods
/**
* Lookup an attribute from a DOM node.
* @param n
* @param name
* @return
*/
private static String getAttributeValue(Node n, String name)
{
NamedNodeMap nm = n.getAttributes();
for (int i = 0; i < nm.getLength(); i++)
{
Node node = nm.item(i);
if (name.equals(node.getNodeName()))
{
return node.getNodeValue();
}
}
return "";
}
/**
* Return the String value of a Node.
* @param node
* @return
*/
private static String getStringValue(Node node)
{
String value = node.getNodeValue();
if (node.hasChildNodes())
{
Node first = node.getFirstChild();
if (first.getNodeType() == Node.TEXT_NODE)
{
return first.getNodeValue();
}
}
return value;
}
/**
* rewrite of ItemImport's functionality
* but just the parsing of the file, not the processing of its elements
*
*
* @validate flag to verify matching files in tree
* @return
*/
public static List<ContentsEntry> readContentsFile(File f)
throws FileNotFoundException, IOException, ParseException
{
List<ContentsEntry> list = new ArrayList<ContentsEntry>();
BufferedReader in = null;
try
{
in = new BufferedReader(new FileReader(f));
String line = null;
while ((line = in.readLine()) != null)
{
line = line.trim();
if ("".equals(line))
{
continue;
}
ItemUpdate.pr("Contents entry: " + line);
list.add(ContentsEntry.parse(line));
}
}
finally
{
try
{
in.close();
}
catch(IOException e)
{
//skip
}
}
return list;
}
/**
*
* @param f
* @return
* @throws FileNotFoundException
* @throws IOException
*/
public static List<Integer> readDeleteContentsFile(File f)
throws FileNotFoundException, IOException
{
List<Integer> list = new ArrayList<Integer>();
BufferedReader in = null;
try
{
in = new BufferedReader(new FileReader(f));
String line = null;
while ((line = in.readLine()) != null)
{
line = line.trim();
if ("".equals(line))
{
continue;
}
int n = 0;
try
{
n = Integer.parseInt(line);
list.add(n);
}
catch(NumberFormatException e)
{
ItemUpdate.pr("Error reading delete contents line:" + e.toString());
}
}
}
finally
{
try
{
in.close();
}
catch(IOException e)
{
//skip
}
}
return list;
}
/**
* Get display of DCValue
*
* @param dcv
* @return string displaying elements of the DCValue
*/
public static String getDCValueString(DCValue dcv)
{
return "schema: " + dcv.schema + "; element: " + dcv.element + "; qualifier: " + dcv.qualifier +
"; language: " + dcv.language + "; value: " + dcv.value;
}
/**
*
* @return a String representation of the two- or three-part form of a metadata element
* e.g. dc.identifier.uri
*/
public static String getCompoundForm(String schema, String element, String qualifier)
{
StringBuilder sb = new StringBuilder();
sb.append(schema).append(".").append(element);
if (qualifier != null)
{
sb.append(".").append(qualifier);
}
return sb.toString();
}
/**
* Parses metadata field given in the form <schema>.<element>[.<qualifier>|.*]
* checks for correct number of elements (2 or 3) and for empty strings
*
* @return String Array
* @throws ParseException if validity checks fail
*
*/
public static String[] parseCompoundForm(String compoundForm)
throws ParseException
{
String[] ar = compoundForm.split("\\s*\\.\\s*"); //trim ends
if ("".equals(ar[0]))
{
throw new ParseException("schema is empty string: " + compoundForm, 0);
}
if ((ar.length < 2) || (ar.length > 3) || "".equals(ar[1]))
{
throw new ParseException("element is malformed or empty string: " + compoundForm, 0);
}
return ar;
}
}
| Java |
/**
* The contents of this file are subject to the license and copyright
* detailed in the LICENSE and NOTICE files at the root of the source
* tree and available online at
*
* http://www.dspace.org/license/
*/
package org.dspace.app.itemupdate;
import java.sql.SQLException;
import org.dspace.content.Bitstream;
import org.dspace.content.Bundle;
/**
* BitstreamFilter implementation to filter by bundle name
*
*/
public class BitstreamFilterByBundleName extends BitstreamFilter {
protected String bundleName;
public BitstreamFilterByBundleName()
{
//empty
}
/**
* Filter bitstream based on bundle name found in properties file
*
* @param bitstream
* @throws BitstreamFilterException
* @return whether bitstream is in bundle
*
*/
public boolean accept(Bitstream bitstream)
throws BitstreamFilterException
{
if (bundleName == null)
{
bundleName = props.getProperty("bundle");
if (bundleName == null)
{
throw new BitstreamFilterException("Property 'bundle' not found.");
}
}
try
{
Bundle[] bundles = bitstream.getBundles();
for (Bundle b : bundles)
{
if (b.getName().equals(bundleName))
{
return true;
}
}
}
catch(SQLException e)
{
throw new BitstreamFilterException(e);
}
return false;
}
}
| Java |
/**
* The contents of this file are subject to the license and copyright
* detailed in the LICENSE and NOTICE files at the root of the source
* tree and available online at
*
* http://www.dspace.org/license/
*/
package org.dspace.app.itemupdate;
import java.util.regex.*;
import org.dspace.content.Bitstream;
/**
* BitstreamFilter implementation to filter by filename pattern
*
*/
public class BitstreamFilterByFilename extends BitstreamFilter {
private Pattern pattern;
private String filenameRegex;
public BitstreamFilterByFilename()
{
//empty
}
/**
* Tests bitstream by matching the regular expression in the
* properties against the bitstream name
*
* @return whether bitstream name matches the regular expression
*/
public boolean accept(Bitstream bitstream) throws BitstreamFilterException
{
if (filenameRegex == null)
{
filenameRegex = props.getProperty("filename");
if (filenameRegex == null)
{
throw new BitstreamFilterException("BitstreamFilter property 'filename' not found.");
}
pattern = Pattern.compile(filenameRegex);
}
Matcher m = pattern.matcher(bitstream.getName());
return m.matches();
}
}
| Java |
/**
* The contents of this file are subject to the license and copyright
* detailed in the LICENSE and NOTICE files at the root of the source
* tree and available online at
*
* http://www.dspace.org/license/
*/
package org.dspace.app.itemupdate;
/**
* Exception class for BitstreamFilters
*
*/
public class BitstreamFilterException extends Exception
{
private static final long serialVersionUID = 1L;
public BitstreamFilterException() {}
public BitstreamFilterException(String msg)
{
super(msg);
}
public BitstreamFilterException(Exception e)
{
super(e);
}
}
| Java |
/**
* The contents of this file are subject to the license and copyright
* detailed in the LICENSE and NOTICE files at the root of the source
* tree and available online at
*
* http://www.dspace.org/license/
*/
package org.dspace.app.itemupdate;
import java.sql.SQLException;
import org.dspace.content.Bitstream;
import org.dspace.content.Bundle;
/**
* Filter all bitstreams in the ORIGINAL bundle
* Also delete all derivative bitstreams, i.e.
* all bitstreams in the TEXT and THUMBNAIL bundles
*/
public class OriginalBitstreamFilter extends BitstreamFilterByBundleName
{
public OriginalBitstreamFilter()
{
//empty
}
/**
* Tests bitstreams for containment in an ORIGINAL bundle
*
* @return true if the bitstream is in the ORIGINAL bundle
*
* @throws BitstreamFilterException
*/
public boolean accept(Bitstream bitstream)
throws BitstreamFilterException
{
try
{
Bundle[] bundles = bitstream.getBundles();
for (Bundle b : bundles)
{
if (b.getName().equals("ORIGINAL"))
{
return true;
}
}
}
catch(SQLException e)
{
throw new BitstreamFilterException(e);
}
return false;
}
}
| Java |
/**
* The contents of this file are subject to the license and copyright
* detailed in the LICENSE and NOTICE files at the root of the source
* tree and available online at
*
* http://www.dspace.org/license/
*/
package org.dspace.app.itemupdate;
import java.util.Properties;
/**
* Bitstream filter to delete from TEXT bundle
*
*/
public class DerivativeTextBitstreamFilter extends BitstreamFilterByBundleName {
public DerivativeTextBitstreamFilter()
{
props = new Properties();
props.setProperty("bundle", "TEXT");
}
}
| Java |
/**
* The contents of this file are subject to the license and copyright
* detailed in the LICENSE and NOTICE files at the root of the source
* tree and available online at
*
* http://www.dspace.org/license/
*/
package org.dspace.app.itemupdate;
import java.io.BufferedWriter;
import java.io.File;
import java.io.FilenameFilter;
import java.io.FileNotFoundException;
import java.io.FileWriter;
import java.io.IOException;
import java.io.PrintWriter;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Date;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.apache.commons.cli.CommandLine;
import org.apache.commons.cli.CommandLineParser;
import org.apache.commons.cli.HelpFormatter;
import org.apache.commons.cli.Option;
import org.apache.commons.cli.Options;
import org.apache.commons.cli.PosixParser;
import org.dspace.content.Item;
import org.dspace.core.ConfigurationManager;
import org.dspace.core.Context;
import org.dspace.eperson.EPerson;
/**
*
* Provides some batch editing capabilities for items in DSpace:
* Metadata fields - Add, Delete
* Bitstreams - Add, Delete
*
* The design has been for compatibility with ItemImporter
* in the use of the DSpace archive format which is used to
* specify changes on a per item basis. The directory names
* to correspond to each item are arbitrary and will only be
* used for logging purposes. The reference to the item is
* from a required dc.identifier with the item handle to be
* included in the dublin_core.xml (or similar metadata) file.
*
* Any combination of these actions is permitted in a single run of this class
* The order of actions is important when used in combination.
* It is the responsibility of the calling class (here, ItemUpdate)
* to register UpdateAction classes in the order to which they are
* to be performed.
*
*
* It is unfortunate that so much code needs to be borrowed
* from ItemImport as it is not reusable in private methods, etc.
* Some of this has been placed into the MetadataUtilities class
* for possible reuse elsewhere.
*
*
* @author W. Hays based on a conceptual design by R. Rodgers
*
*/
public class ItemUpdate {
public static final String SUPPRESS_UNDO_FILENAME = "suppress_undo";
public static final String CONTENTS_FILE = "contents";
public static final String DELETE_CONTENTS_FILE = "delete_contents";
public static String HANDLE_PREFIX = null;
public static final Map<String, String> filterAliases = new HashMap<String, String>();
public static boolean verbose = false;
static
{
filterAliases.put("ORIGINAL", "org.dspace.app.itemupdate.OriginalBitstreamFilter");
filterAliases.put("ORIGINAL_AND_DERIVATIVES", "org.dspace.app.itemupdate.OriginalWithDerivativesBitstreamFilter");
filterAliases.put("TEXT", "org.dspace.app.itemupdate.DerivativeTextBitstreamFilter");
filterAliases.put("THUMBNAIL", "org.dspace.app.itemupdate.ThumbnailBitstreamFilter");
}
// File listing filter to check for folders
static FilenameFilter directoryFilter = new FilenameFilter()
{
public boolean accept(File dir, String n)
{
File f = new File(dir.getAbsolutePath() + File.separatorChar + n);
return f.isDirectory();
}
};
// File listing filter to check for files (not directories)
static FilenameFilter fileFilter = new FilenameFilter()
{
public boolean accept(File dir, String n)
{
File f = new File(dir.getAbsolutePath() + File.separatorChar + n);
return (f.isFile());
}
};
// instance variables
private ActionManager actionMgr = new ActionManager();
private List<String> undoActionList = new ArrayList<String>();
private String eperson;
/**
*
* @param argv
*/
public static void main(String[] argv)
{
// create an options object and populate it
CommandLineParser parser = new PosixParser();
Options options = new Options();
//processing basis for determining items
//item-specific changes with metadata in source directory with dublin_core.xml files
options.addOption("s", "source", true, "root directory of source dspace archive ");
//actions on items
options.addOption("a", "addmetadata", true, "add metadata specified for each item; multiples separated by semicolon ';'");
options.addOption("d", "deletemetadata", true, "delete metadata specified for each item");
options.addOption("A", "addbitstreams", false, "add bitstreams as specified for each item");
// extra work to get optional argument
Option delBitstreamOption = new Option("D", "deletebitstreams", true, "delete bitstreams as specified for each item");
delBitstreamOption.setOptionalArg(true);
delBitstreamOption.setArgName("BitstreamFilter");
options.addOption(delBitstreamOption);
//other params
options.addOption("e", "eperson", true, "email of eperson doing the update");
options.addOption("i", "itemfield", true, "optional metadata field that containing item identifier; default is dc.identifier.uri");
options.addOption("F", "filter-properties", true, "filter class name; only for deleting bitstream");
options.addOption("v", "verbose", false, "verbose logging");
//special run states
options.addOption("t", "test", false, "test run - do not actually import items");
options.addOption("P", "provenance", false, "suppress altering provenance field for bitstream changes");
options.addOption("h", "help", false, "help");
int status = 0;
boolean isTest = false;
boolean alterProvenance = true;
String itemField = null;
String metadataIndexName = null;
Context context = null;
ItemUpdate iu = new ItemUpdate();
try
{
CommandLine line = parser.parse(options, argv);
if (line.hasOption('h'))
{
HelpFormatter myhelp = new HelpFormatter();
myhelp.printHelp("ItemUpdate", options);
pr("");
pr("Examples:");
pr(" adding metadata: ItemUpdate -e jsmith@mit.edu -s sourcedir -a dc.contributor -a dc.subject ");
pr(" deleting metadata: ItemUpdate -e jsmith@mit.edu -s sourcedir -d dc.description.other");
pr(" adding bitstreams: ItemUpdate -e jsmith@mit.edu -s sourcedir -A -i dc.identifier");
pr(" deleting bitstreams: ItemUpdate -e jsmith@mit.edu -s sourcedir -D ORIGINAL ");
pr("");
System.exit(0);
}
if (line.hasOption('v'))
{
verbose = true;
}
if (line.hasOption('P'))
{
alterProvenance = false;
pr("Suppressing changes to Provenance field option");
}
iu.eperson = line.getOptionValue('e'); // db ID or email
if (!line.hasOption('s')) // item specific changes from archive dir
{
pr("Missing source archive option");
System.exit(1);
}
String sourcedir = line.getOptionValue('s');
if (line.hasOption('t')) //test
{
isTest = true;
pr("**Test Run** - not actually updating items.");
}
if (line.hasOption('i'))
{
itemField = line.getOptionValue('i');
}
if (line.hasOption('d'))
{
String[] targetFields = line.getOptionValues('d');
DeleteMetadataAction delMetadataAction = (DeleteMetadataAction) iu.actionMgr.getUpdateAction(DeleteMetadataAction.class);
delMetadataAction.addTargetFields(targetFields);
//undo is an add
for (String field : targetFields)
{
iu.undoActionList.add(" -a " + field + " ");
}
pr("Delete metadata for fields: ");
for (String s : targetFields)
{
pr(" " + s);
}
}
if (line.hasOption('a'))
{
String[] targetFields = line.getOptionValues('a');
AddMetadataAction addMetadataAction = (AddMetadataAction) iu.actionMgr.getUpdateAction(AddMetadataAction.class);
addMetadataAction.addTargetFields(targetFields);
//undo is a delete followed by an add of a replace record for target fields
for (String field : targetFields)
{
iu.undoActionList.add(" -d " + field + " ");
}
for (String field : targetFields)
{
iu.undoActionList.add(" -a " + field + " ");
}
pr("Add metadata for fields: ");
for (String s : targetFields)
{
pr(" " + s);
}
}
if (line.hasOption('D')) // undo not supported
{
pr("Delete bitstreams ");
String[] filterNames = line.getOptionValues('D');
if ((filterNames != null) && (filterNames.length > 1))
{
pr("Error: Only one filter can be a used at a time.");
System.exit(1);
}
String filterName = line.getOptionValue('D');
pr("Filter argument: " + filterName);
if (filterName == null) // indicates using delete_contents files
{
DeleteBitstreamsAction delAction = (DeleteBitstreamsAction) iu.actionMgr.getUpdateAction(DeleteBitstreamsAction.class);
delAction.setAlterProvenance(alterProvenance);
}
else
{
// check if param is on ALIAS list
String filterClassname = filterAliases.get(filterName);
if (filterClassname == null)
{
filterClassname = filterName;
}
BitstreamFilter filter = null;
try
{
Class<?> cfilter = Class.forName(filterClassname);
pr("BitstreamFilter class to instantiate: " + cfilter.toString());
filter = (BitstreamFilter) cfilter.newInstance(); //unfortunate cast, an erasure consequence
}
catch(Exception e)
{
pr("Error: Failure instantiating bitstream filter class: " + filterClassname);
System.exit(1);
}
String filterPropertiesName = line.getOptionValue('F');
if (filterPropertiesName != null) //not always required
{
try
{
// TODO try multiple relative locations, e.g. source dir
if (!filterPropertiesName.startsWith("/"))
{
filterPropertiesName = sourcedir + File.separator + filterPropertiesName;
}
filter.initProperties(filterPropertiesName);
}
catch(Exception e)
{
pr("Error: Failure finding properties file for bitstream filter class: " + filterPropertiesName);
System.exit(1);
}
}
DeleteBitstreamsByFilterAction delAction =
(DeleteBitstreamsByFilterAction) iu.actionMgr.getUpdateAction(DeleteBitstreamsByFilterAction.class);
delAction.setAlterProvenance(alterProvenance);
delAction.setBitstreamFilter(filter);
//undo not supported
}
}
if (line.hasOption('A'))
{
pr("Add bitstreams ");
AddBitstreamsAction addAction = (AddBitstreamsAction) iu.actionMgr.getUpdateAction(AddBitstreamsAction.class);
addAction.setAlterProvenance(alterProvenance);
iu.undoActionList.add(" -D "); // delete_contents file will be written, no arg required
}
if (!iu.actionMgr.hasActions())
{
pr("Error - an action must be specified");
System.exit(1);
}
else
{
pr("Actions to be performed: ");
for (UpdateAction ua : iu.actionMgr)
{
pr(" " + ua.getClass().getName());
}
}
pr("ItemUpdate - initializing run on " + (new Date()).toString());
context = new Context();
iu.setEPerson(context, iu.eperson);
context.setIgnoreAuthorization(true);
HANDLE_PREFIX = ConfigurationManager.getProperty("handle.canonical.prefix");
if (HANDLE_PREFIX == null || HANDLE_PREFIX.length() == 0)
{
HANDLE_PREFIX = "http://hdl.handle.net/";
}
iu.processArchive(context, sourcedir, itemField, metadataIndexName, alterProvenance, isTest);
context.complete(); // complete all transactions
context.setIgnoreAuthorization(false);
}
catch (Exception e)
{
if (context != null && context.isValid())
{
context.abort();
context.setIgnoreAuthorization(false);
}
e.printStackTrace();
pr(e.toString());
status = 1;
}
if (isTest)
{
pr("***End of Test Run***");
}
else
{
pr("End.");
}
System.exit(status);
}
private void processArchive(Context context, String sourceDirPath, String itemField,
String metadataIndexName, boolean alterProvenance, boolean isTest)
throws Exception
{
// open and process the source directory
File sourceDir = new File(sourceDirPath);
if ((sourceDir == null) || !sourceDir.exists() || !sourceDir.isDirectory())
{
pr("Error, cannot open archive source directory " + sourceDirPath);
throw new Exception("error with archive source directory " + sourceDirPath);
}
String[] dircontents = sourceDir.list(directoryFilter); //just the names, not the path
Arrays.sort(dircontents);
//Undo is suppressed to prevent undo of undo
boolean suppressUndo = false;
File fSuppressUndo = new File(sourceDir, SUPPRESS_UNDO_FILENAME);
if (fSuppressUndo.exists())
{
suppressUndo = true;
}
File undoDir = null; //sibling directory of source archive
if (!suppressUndo && !isTest)
{
undoDir = initUndoArchive(sourceDir);
}
int itemCount = 0;
int successItemCount = 0;
for (String dirname : dircontents)
{
itemCount++;
pr("");
pr("processing item " + dirname);
try
{
ItemArchive itarch = ItemArchive.create(context, new File(sourceDir, dirname), itemField);
for (UpdateAction action : actionMgr)
{
pr("action: " + action.getClass().getName());
action.execute(context, itarch, isTest, suppressUndo);
if (!isTest && !suppressUndo)
{
itarch.writeUndo(undoDir);
}
}
if (!isTest)
{
Item item = itarch.getItem();
item.update(); //need to update before commit
context.commit();
item.decache();
}
ItemUpdate.pr("Item " + dirname + " completed");
successItemCount++;
}
catch(Exception e)
{
pr("Exception processing item " + dirname + ": " + e.toString());
}
}
if (!suppressUndo && !isTest)
{
StringBuilder sb = new StringBuilder("dsrun org.dspace.app.itemupdate.ItemUpdate ");
sb.append(" -e ").append(this.eperson);
sb.append(" -s ").append(undoDir);
if (itemField != null)
{
sb.append(" -i ").append(itemField);
}
if (!alterProvenance)
{
sb.append(" -P ");
}
if (isTest)
{
sb.append(" -t ");
}
for (String actionOption : undoActionList)
{
sb.append(actionOption);
}
PrintWriter pw = null;
try
{
File cmdFile = new File (undoDir.getParent(), undoDir.getName() + "_command.sh");
pw = new PrintWriter(new BufferedWriter(new FileWriter(cmdFile)));
pw.println(sb.toString());
}
finally
{
pw.close();
}
}
pr("");
pr("Done processing. Successful items: " + successItemCount + " of " + itemCount + " items in source archive");
pr("");
}
/**
*
* to avoid overwriting the undo source tree on repeated processing
* sequence numbers are added and checked
*
* @param sourceDir - the original source directory
* @return the directory of the undo archive
* @throws FileNotFoundException
* @throws IOException
*/
private File initUndoArchive(File sourceDir)
throws FileNotFoundException, IOException
{
File parentDir = sourceDir.getAbsoluteFile().getParentFile();
if (parentDir == null)
{
throw new FileNotFoundException("Parent directory of archive directory not found; unable to write UndoArchive; no processing performed");
}
String sourceDirName = sourceDir.getName();
int seqNo = 1;
File undoDir = new File(parentDir, "undo_" + sourceDirName + "_" + seqNo);
while (undoDir.exists())
{
undoDir = new File(parentDir, "undo_" + sourceDirName+ "_" + ++seqNo); //increment
}
// create root directory
if (!undoDir.mkdir())
{
pr("ERROR creating Undo Archive directory ");
throw new IOException("ERROR creating Undo Archive directory ");
}
//Undo is suppressed to prevent undo of undo
File fSuppressUndo = new File(undoDir, ItemUpdate.SUPPRESS_UNDO_FILENAME);
try
{
fSuppressUndo.createNewFile();
}
catch(IOException e)
{
pr("ERROR creating Suppress Undo File " + e.toString());
throw e;
}
return undoDir;
}
//private void write
private void setEPerson(Context context, String eperson)
throws Exception
{
if (eperson == null)
{
pr("Error - an eperson to do the importing must be specified");
pr(" (run with -h flag for details)");
throw new Exception("EPerson not specified."); }
EPerson myEPerson = null;
if (eperson.indexOf('@') != -1)
{
// @ sign, must be an email
myEPerson = EPerson.findByEmail(context, eperson);
}
else
{
myEPerson = EPerson.find(context, Integer.parseInt(eperson));
}
if (myEPerson == null)
{
pr("Error, eperson cannot be found: " + eperson);
throw new Exception("Invalid EPerson");
}
context.setCurrentUser(myEPerson);
}
/**
* poor man's logging
* As with ItemImport, API logging goes through log4j to the DSpace.log files
* whereas the batch logging goes to the console to be captured there.
* @param s
*/
static void pr(String s)
{
System.out.println(s);
}
/**
* print if verbose flag is set
* @param s
*/
static void prv(String s)
{
if (verbose)
{
System.out.println(s);
}
}
} //end of class
| Java |
/**
* The contents of this file are subject to the license and copyright
* detailed in the LICENSE and NOTICE files at the root of the source
* tree and available online at
*
* http://www.dspace.org/license/
*/
package org.dspace.app.itemupdate;
import java.io.BufferedInputStream;
import java.io.File;
import java.io.FileInputStream;
import java.io.IOException;
import java.sql.SQLException;
import java.text.ParseException;
import java.util.ArrayList;
import java.util.List;
import org.dspace.authorize.AuthorizeException;
import org.dspace.authorize.AuthorizeManager;
import org.dspace.authorize.ResourcePolicy;
import org.dspace.content.Bitstream;
import org.dspace.content.BitstreamFormat;
import org.dspace.content.Bundle;
import org.dspace.content.DCDate;
import org.dspace.content.FormatIdentifier;
import org.dspace.content.InstallItem;
import org.dspace.content.Item;
import org.dspace.core.Context;
import org.dspace.eperson.Group;
/**
* Action to add bitstreams listed in item contents file to the item in DSpace
*
*
*/
public class AddBitstreamsAction extends UpdateBitstreamsAction {
public AddBitstreamsAction()
{
//empty
}
/**
* Adds bitstreams from the archive as listed in the contents file.
*
* @param context
* @param itarch
* @param isTest
* @param suppressUndo
* @throws IllegalArgumentException
* @throws ParseException
* @throws IOException
* @throws AuthorizeException
* @throws SQLException
*/
public void execute(Context context, ItemArchive itarch, boolean isTest,
boolean suppressUndo) throws IllegalArgumentException,
ParseException, IOException, AuthorizeException, SQLException
{
Item item = itarch.getItem();
File dir = itarch.getDirectory();
List<ContentsEntry> contents = MetadataUtilities.readContentsFile(new File(dir, ItemUpdate.CONTENTS_FILE));
if (contents.isEmpty())
{
ItemUpdate.pr("Contents is empty - no bitstreams to add");
return;
}
ItemUpdate.pr("Contents bitstream count: " + contents.size());
String[] files = dir.list(ItemUpdate.fileFilter);
List<String> fileList = new ArrayList<String>();
for (String filename : files)
{
fileList.add(filename);
ItemUpdate.pr("file: " + filename);
}
for (ContentsEntry ce : contents)
{
//validate match to existing file in archive
if (!fileList.contains(ce.filename))
{
throw new IllegalArgumentException("File listed in contents is missing: " + ce.filename);
}
}
//now okay to add
for (ContentsEntry ce : contents)
{
addBitstream(context, itarch, item, dir, ce, suppressUndo, isTest);
}
}
private void addBitstream(Context context, ItemArchive itarch, Item item, File dir,
ContentsEntry ce, boolean suppressUndo, boolean isTest)
throws IOException, IllegalArgumentException, SQLException, AuthorizeException, ParseException
{
ItemUpdate.pr("contents entry for bitstream: " + ce.toString());
File f = new File(dir, ce.filename);
// get an input stream
BufferedInputStream bis = new BufferedInputStream(new FileInputStream(f));
Bitstream bs = null;
String newBundleName = ce.bundlename;
if (ce.bundlename == null) // should be required but default convention established
{
if (ce.filename.equals("license.txt"))
{
newBundleName = "LICENSE";
}
else
{
newBundleName = "ORIGINAL";
}
}
ItemUpdate.pr(" Bitstream " + ce.filename + " to be added to bundle: " + newBundleName);
if (!isTest)
{
// find the bundle
Bundle[] bundles = item.getBundles(newBundleName);
Bundle targetBundle = null;
if (bundles.length < 1)
{
// not found, create a new one
targetBundle = item.createBundle(newBundleName);
}
else
{
//verify bundle + name are not duplicates
for (Bundle b : bundles)
{
Bitstream[] bitstreams = b.getBitstreams();
for (Bitstream bsm : bitstreams)
{
if (bsm.getName().equals(ce.filename))
{
throw new IllegalArgumentException("Duplicate bundle + filename cannot be added: "
+ b.getName() + " + " + bsm.getName());
}
}
}
// select first bundle
targetBundle = bundles[0];
}
bs = targetBundle.createBitstream(bis);
bs.setName(ce.filename);
// Identify the format
// FIXME - guessing format guesses license.txt incorrectly as a text file format!
BitstreamFormat fmt = FormatIdentifier.guessFormat(context, bs);
bs.setFormat(fmt);
if (ce.description != null)
{
bs.setDescription(ce.description);
}
if ((ce.permissionsActionId != -1) && (ce.permissionsGroupName != null))
{
Group group = Group.findByName(context, ce.permissionsGroupName);
if (group != null)
{
AuthorizeManager.removeAllPolicies(context, bs); // remove the default policy
ResourcePolicy rp = ResourcePolicy.create(context);
rp.setResource(bs);
rp.setAction(ce.permissionsActionId);
rp.setGroup(group);
rp.update();
}
}
if (alterProvenance && !targetBundle.getName().equals("THUMBNAIL")
&& !targetBundle.getName().equals("TEXT"))
{
DtoMetadata dtom = DtoMetadata.create("dc.description.provenance", "en", "");
String append = "Bitstream added on " + DCDate.getCurrent() + " : "
+ InstallItem.getBitstreamProvenanceMessage(item);
MetadataUtilities.appendMetadata(item, dtom, false, append);
}
//update after all changes are applied, even metadata ones
bs.update();
if (!suppressUndo)
{
itarch.addUndoDeleteContents(bs.getID());
}
}
}
}
| Java |
/**
* The contents of this file are subject to the license and copyright
* detailed in the LICENSE and NOTICE files at the root of the source
* tree and available online at
*
* http://www.dspace.org/license/
*/
package org.dspace.app.itemupdate;
import java.util.Properties;
/**
* Bitstream filter targetting the THUMBNAIL bundle
*
*/
public class ThumbnailBitstreamFilter extends BitstreamFilterByBundleName {
public ThumbnailBitstreamFilter()
{
props = new Properties();
props.setProperty("bundle", "THUMBNAIL");
}
}
| Java |
/**
* The contents of this file are subject to the license and copyright
* detailed in the LICENSE and NOTICE files at the root of the source
* tree and available online at
*
* http://www.dspace.org/license/
*/
package org.dspace.app.sitemap;
import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.OutputStream;
import java.io.PrintStream;
import java.util.Date;
import java.util.zip.GZIPOutputStream;
/**
* Base class for creating sitemaps of various kinds. A sitemap consists of one
* or more files which list significant URLs on a site for search engines to
* efficiently crawl. Dates of modification may also be included. A sitemap
* index file that links to each of the sitemap files is also generated. It is
* this index file that search engines should be directed towards.
* <P>
* Provides most of the required functionality, subclasses need just implement a
* few methods that specify the "boilerplate" and text for including URLs.
* <P>
* Typical usage:
* <pre>
* AbstractGenerator g = new FooGenerator(...);
* while (...) {
* g.addURL(url, date);
* }
* g.finish();
* </pre>
*
* @author Robert Tansley
*/
public abstract class AbstractGenerator
{
/** Number of files written so far */
protected int fileCount;
/** Number of bytes written to current file */
protected int bytesWritten;
/** Number of URLs written to current file */
protected int urlsWritten;
/** Directory files are written to */
protected File outputDir;
/** Current output */
protected PrintStream currentOutput;
/** Size in bytes of trailing boilerplate */
private int trailingByteCount;
/**
* Initialize this generator to write to the given directory. This must be
* called by any subclass constructor.
*
* @param outputDirIn
* directory to write sitemap files to
*/
public AbstractGenerator(File outputDirIn)
{
fileCount = 0;
outputDir = outputDirIn;
trailingByteCount = getTrailingBoilerPlate().length();
currentOutput = null;
}
/**
* Start writing a new sitemap file.
*
* @throws IOException
* if an error occurs creating the file
*/
protected void startNewFile() throws IOException
{
String lbp = getLeadingBoilerPlate();
OutputStream fo = new FileOutputStream(new File(outputDir,
getFilename(fileCount)));
if (useCompression())
{
fo = new GZIPOutputStream(fo);
}
currentOutput = new PrintStream(fo);
currentOutput.print(lbp);
bytesWritten = lbp.length();
urlsWritten = 0;
}
/**
* Add the given URL to the sitemap.
*
* @param url
* Full URL to add
* @param lastMod
* Date URL was last modified, or {@code null}
* @throws IOException
* if an error occurs writing
*/
public void addURL(String url, Date lastMod) throws IOException
{
// Kick things off if this is the first call
if (currentOutput == null)
{
startNewFile();
}
String newURLText = getURLText(url, lastMod);
if (bytesWritten + newURLText.length() + trailingByteCount > getMaxSize()
|| urlsWritten + 1 > getMaxURLs())
{
closeCurrentFile();
startNewFile();
}
currentOutput.print(newURLText);
bytesWritten += newURLText.length();
urlsWritten++;
}
/**
* Finish with the current sitemap file.
*
* @throws IOException
* if an error occurs writing
*/
protected void closeCurrentFile() throws IOException
{
currentOutput.print(getTrailingBoilerPlate());
currentOutput.close();
fileCount++;
}
/**
* Complete writing sitemap files and write the index files. This is invoked
* when all calls to {@link AbstractGenerator#addURL(String, Date)} have
* been completed, and invalidates the generator.
*
* @return number of sitemap files written.
*
* @throws IOException
* if an error occurs writing
*/
public int finish() throws IOException
{
closeCurrentFile();
OutputStream fo = new FileOutputStream(new File(outputDir,
getIndexFilename()));
if (useCompression())
{
fo = new GZIPOutputStream(fo);
}
PrintStream out = new PrintStream(fo);
writeIndex(out, fileCount);
out.close();
return fileCount;
}
/**
* Return marked-up text to be included in a sitemap about a given URL.
*
* @param url
* URL to add information about
* @param lastMod
* date URL was last modified, or {@code null} if unknown or not
* applicable
* @return the mark-up to include
*/
public abstract String getURLText(String url, Date lastMod);
/**
* Return the boilerplate at the top of a sitemap file.
*
* @return The boilerplate markup.
*/
public abstract String getLeadingBoilerPlate();
/**
* Return the boilerplate at the end of a sitemap file.
*
* @return The boilerplate markup.
*/
public abstract String getTrailingBoilerPlate();
/**
* Return the maximum size in bytes that an individual sitemap file should
* be.
*
* @return the size in bytes.
*/
public abstract int getMaxSize();
/**
* Return the maximum number of URLs that an individual sitemap file should
* contain.
*
* @return the maximum number of URLs.
*/
public abstract int getMaxURLs();
/**
* Return whether the written sitemap files and index should be
* GZIP-compressed.
*
* @return {@code true} if GZIP compression should be used, {@code false}
* otherwise.
*/
public abstract boolean useCompression();
/**
* Return the filename a sitemap at the given index should be stored at.
*
* @param number
* index of the sitemap file (zero is first).
* @return the filename to write the sitemap to.
*/
public abstract String getFilename(int number);
/**
* Get the filename the index should be written to.
*
* @return the filename of the index.
*/
public abstract String getIndexFilename();
/**
* Write the index file.
*
* @param output
* stream to write the index to
* @param sitemapCount
* number of sitemaps that were generated
* @throws IOException
* if an IO error occurs
*/
public abstract void writeIndex(PrintStream output, int sitemapCount)
throws IOException;
}
| Java |
/**
* The contents of this file are subject to the license and copyright
* detailed in the LICENSE and NOTICE files at the root of the source
* tree and available online at
*
* http://www.dspace.org/license/
*/
package org.dspace.app.sitemap;
import java.io.File;
import java.io.IOException;
import java.io.PrintStream;
import java.text.DateFormat;
import java.text.SimpleDateFormat;
import java.util.Date;
/**
* Class for generating <a href="http://sitemaps.org/">Sitemaps</a> to improve
* search engine coverage of the DSpace site and limit the server load caused by
* crawlers.
*
* @author Robert Tansley
* @author Stuart Lewis
*/
public class SitemapsOrgGenerator extends AbstractGenerator
{
/** Stem of URLs sitemaps will eventually appear at */
private String indexURLStem;
/** Tail of URLs sitemaps will eventually appear at */
private String indexURLTail;
/** The correct date format */
private DateFormat w3dtfFormat = new SimpleDateFormat(
"yyyy-MM-dd'T'HH:mm:ss'Z'");
/**
* Construct a sitemaps.org protocol sitemap generator, writing files to the
* given directory, and with the sitemaps eventually exposed at starting
* with the given URL stem and tail.
*
* @param outputDirIn
* Directory to write sitemap files to
* @param urlStem
* start of URL that sitemap files will appear at, e.g.
* {@code http://dspace.myu.edu/sitemap?sitemap=}
* @param urlTail
* end of URL that sitemap files will appear at, e.g.
* {@code .html} or {@code null}
*/
public SitemapsOrgGenerator(File outputDirIn, String urlStem, String urlTail)
{
super(outputDirIn);
indexURLStem = urlStem;
indexURLTail = (urlTail == null ? "" : urlTail);
}
public String getFilename(int number)
{
return "sitemap" + number + ".xml.gz";
}
public String getLeadingBoilerPlate()
{
return "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n"
+ "<urlset xmlns=\"http://www.sitemaps.org/schemas/sitemap/0.9\">";
}
public int getMaxSize()
{
// 10 Mb
return 10485760;
}
public int getMaxURLs()
{
return 50000;
}
public String getTrailingBoilerPlate()
{
return "</urlset>";
}
public String getURLText(String url, Date lastMod)
{
StringBuffer urlText = new StringBuffer();
urlText.append("<url><loc>").append(url).append("</loc>");
if (lastMod != null)
{
urlText.append("<lastmod>").append(w3dtfFormat.format(lastMod))
.append("</lastmod>");
}
urlText.append("</url>\n");
return urlText.toString();
}
public boolean useCompression()
{
return true;
}
public String getIndexFilename()
{
return "sitemap_index.xml.gz";
}
public void writeIndex(PrintStream output, int sitemapCount)
throws IOException
{
String now = w3dtfFormat.format(new Date());
output.println("<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n");
output
.println("<sitemapindex xmlns=\"http://www.sitemaps.org/schemas/sitemap/0.9\">");
for (int i = 0; i < sitemapCount; i++)
{
output.print("<sitemap><loc>" + indexURLStem + i + indexURLTail
+ "</loc>");
output.print("<lastmod>" + now + "</lastmod></sitemap>\n");
}
output.println("</sitemapindex>");
}
}
| Java |
/**
* The contents of this file are subject to the license and copyright
* detailed in the LICENSE and NOTICE files at the root of the source
* tree and available online at
*
* http://www.dspace.org/license/
*/
package org.dspace.app.sitemap;
import java.io.BufferedReader;
import java.io.File;
import java.io.IOException;
import java.io.InputStreamReader;
import java.io.UnsupportedEncodingException;
import java.net.HttpURLConnection;
import java.net.MalformedURLException;
import java.net.URL;
import java.net.URLEncoder;
import java.sql.SQLException;
import java.util.Date;
import org.apache.commons.cli.CommandLine;
import org.apache.commons.cli.CommandLineParser;
import org.apache.commons.cli.HelpFormatter;
import org.apache.commons.cli.Options;
import org.apache.commons.cli.ParseException;
import org.apache.commons.cli.PosixParser;
import org.apache.log4j.Logger;
import org.dspace.content.Collection;
import org.dspace.content.Community;
import org.dspace.content.Item;
import org.dspace.content.ItemIterator;
import org.dspace.core.ConfigurationManager;
import org.dspace.core.Context;
import org.dspace.core.LogManager;
/**
* Command-line utility for generating HTML and Sitemaps.org protocol Sitemaps.
*
* @author Robert Tansley
* @author Stuart Lewis
*/
public class GenerateSitemaps
{
/** Logger */
private static Logger log = Logger.getLogger(GenerateSitemaps.class);
public static void main(String[] args) throws Exception
{
final String usage = GenerateSitemaps.class.getCanonicalName();
CommandLineParser parser = new PosixParser();
HelpFormatter hf = new HelpFormatter();
Options options = new Options();
options.addOption("h", "help", false, "help");
options.addOption("s", "no_sitemaps", false,
"do not generate sitemaps.org protocol sitemap");
options.addOption("b", "no_htmlmap", false,
"do not generate a basic HTML sitemap");
options.addOption("a", "ping_all", false,
"ping configured search engines");
options
.addOption("p", "ping", true,
"ping specified search engine URL");
CommandLine line = null;
try
{
line = parser.parse(options, args);
}
catch (ParseException pe)
{
hf.printHelp(usage, options);
System.exit(1);
}
if (line.hasOption('h'))
{
hf.printHelp(usage, options);
System.exit(0);
}
if (line.getArgs().length != 0)
{
hf.printHelp(usage, options);
System.exit(1);
}
/*
* Sanity check -- if no sitemap generation or pinging to do, print
* usage
*/
if (line.getArgs().length != 0 || line.hasOption('b')
&& line.hasOption('s') && !line.hasOption('g')
&& !line.hasOption('m') && !line.hasOption('y')
&& !line.hasOption('p'))
{
System.err
.println("Nothing to do (no sitemap to generate, no search engines to ping)");
hf.printHelp(usage, options);
System.exit(1);
}
// Note the negation (CLI options indicate NOT to generate a sitemap)
if (!line.hasOption('b') || !line.hasOption('s'))
{
generateSitemaps(!line.hasOption('b'), !line.hasOption('s'));
}
if (line.hasOption('a'))
{
pingConfiguredSearchEngines();
}
if (line.hasOption('p'))
{
try
{
pingSearchEngine(line.getOptionValue('p'));
}
catch (MalformedURLException me)
{
System.err
.println("Bad search engine URL (include all except sitemap URL)");
System.exit(1);
}
}
System.exit(0);
}
/**
* Generate sitemap.org protocol and/or basic HTML sitemaps.
*
* @param makeHTMLMap
* if {@code true}, generate an HTML sitemap.
* @param makeSitemapOrg
* if {@code true}, generate an sitemap.org sitemap.
* @throws SQLException
* if a database error occurs.
* @throws IOException
* if IO error occurs.
*/
public static void generateSitemaps(boolean makeHTMLMap,
boolean makeSitemapOrg) throws SQLException, IOException
{
String sitemapStem = ConfigurationManager.getProperty("dspace.url")
+ "/sitemap";
String htmlMapStem = ConfigurationManager.getProperty("dspace.url")
+ "/htmlmap";
String handleURLStem = ConfigurationManager.getProperty("dspace.url")
+ "/handle/";
File outputDir = new File(ConfigurationManager.getProperty("sitemap.dir"));
if (!outputDir.exists() && !outputDir.mkdir())
{
log.error("Unable to create output directory");
}
AbstractGenerator html = null;
AbstractGenerator sitemapsOrg = null;
if (makeHTMLMap)
{
html = new HTMLSitemapGenerator(outputDir, htmlMapStem + "?map=",
null);
}
if (makeSitemapOrg)
{
sitemapsOrg = new SitemapsOrgGenerator(outputDir, sitemapStem
+ "?map=", null);
}
Context c = new Context();
Community[] comms = Community.findAll(c);
for (int i = 0; i < comms.length; i++)
{
String url = handleURLStem + comms[i].getHandle();
if (makeHTMLMap)
{
html.addURL(url, null);
}
if (makeSitemapOrg)
{
sitemapsOrg.addURL(url, null);
}
}
Collection[] colls = Collection.findAll(c);
for (int i = 0; i < colls.length; i++)
{
String url = handleURLStem + colls[i].getHandle();
if (makeHTMLMap)
{
html.addURL(url, null);
}
if (makeSitemapOrg)
{
sitemapsOrg.addURL(url, null);
}
}
ItemIterator allItems = Item.findAll(c);
try
{
int itemCount = 0;
while (allItems.hasNext())
{
Item i = allItems.next();
String url = handleURLStem + i.getHandle();
Date lastMod = i.getLastModified();
if (makeHTMLMap)
{
html.addURL(url, lastMod);
}
if (makeSitemapOrg)
{
sitemapsOrg.addURL(url, lastMod);
}
i.decache();
itemCount++;
}
if (makeHTMLMap)
{
int files = html.finish();
log.info(LogManager.getHeader(c, "write_sitemap",
"type=html,num_files=" + files + ",communities="
+ comms.length + ",collections=" + colls.length
+ ",items=" + itemCount));
}
if (makeSitemapOrg)
{
int files = sitemapsOrg.finish();
log.info(LogManager.getHeader(c, "write_sitemap",
"type=html,num_files=" + files + ",communities="
+ comms.length + ",collections=" + colls.length
+ ",items=" + itemCount));
}
}
finally
{
if (allItems != null)
{
allItems.close();
}
}
c.abort();
}
/**
* Ping all search engines configured in {@code dspace.cfg}.
*
* @throws UnsupportedEncodingException
* theoretically should never happen
*/
public static void pingConfiguredSearchEngines()
throws UnsupportedEncodingException
{
String engineURLProp = ConfigurationManager
.getProperty("sitemap.engineurls");
String engineURLs[] = null;
if (engineURLProp != null)
{
engineURLs = engineURLProp.trim().split("\\s*,\\s*");
}
if (engineURLProp == null || engineURLs == null
|| engineURLs.length == 0 || engineURLs[0].trim().equals(""))
{
log.warn("No search engine URLs configured to ping");
return;
}
for (int i = 0; i < engineURLs.length; i++)
{
try
{
pingSearchEngine(engineURLs[i]);
}
catch (MalformedURLException me)
{
log.warn("Bad search engine URL in configuration: "
+ engineURLs[i]);
}
}
}
/**
* Ping the given search engine.
*
* @param engineURL
* Search engine URL minus protocol etc, e.g.
* {@code www.google.com}
* @throws MalformedURLException
* if the passed in URL is malformed
* @throws UnsupportedEncodingException
* theoretically should never happen
*/
public static void pingSearchEngine(String engineURL)
throws MalformedURLException, UnsupportedEncodingException
{
// Set up HTTP proxy
if ((ConfigurationManager.getProperty("http.proxy.host") != null)
&& (ConfigurationManager.getProperty("http.proxy.port") != null))
{
System.setProperty("proxySet", "true");
System.setProperty("proxyHost", ConfigurationManager
.getProperty("http.proxy.host"));
System.getProperty("proxyPort", ConfigurationManager
.getProperty("http.proxy.port"));
}
String sitemapURL = ConfigurationManager.getProperty("dspace.url")
+ "/sitemap";
URL url = new URL(engineURL + URLEncoder.encode(sitemapURL, "UTF-8"));
try
{
HttpURLConnection connection = (HttpURLConnection) url
.openConnection();
BufferedReader in = new BufferedReader(new InputStreamReader(
connection.getInputStream()));
String inputLine;
StringBuffer resp = new StringBuffer();
while ((inputLine = in.readLine()) != null)
{
resp.append(inputLine).append("\n");
}
in.close();
if (connection.getResponseCode() == 200)
{
log.info("Pinged " + url.toString() + " successfully");
}
else
{
log.warn("Error response pinging " + url.toString() + ":\n"
+ resp);
}
}
catch (IOException e)
{
log.warn("Error pinging " + url.toString(), e);
}
}
}
| Java |
/**
* The contents of this file are subject to the license and copyright
* detailed in the LICENSE and NOTICE files at the root of the source
* tree and available online at
*
* http://www.dspace.org/license/
*/
package org.dspace.app.sitemap;
import java.io.File;
import java.io.IOException;
import java.io.PrintStream;
import java.util.Date;
/**
* Class for generating HTML "sitemaps" which contain links to various pages in
* a DSpace site. This should improve search engine coverage of the DSpace site
* and limit the server load caused by crawlers.
*
* @author Robert Tansley
* @author Stuart Lewis
*/
public class HTMLSitemapGenerator extends AbstractGenerator
{
/** Stem of URLs sitemaps will eventually appear at */
private String indexURLStem;
/** Tail of URLs sitemaps will eventually appear at */
private String indexURLTail;
/**
* Construct an HTML sitemap generator, writing files to the given
* directory, and with the sitemaps eventually exposed at starting with the
* given URL stem and tail.
*
* @param outputDirIn
* Directory to write sitemap files to
* @param urlStem
* start of URL that sitemap files will appear at, e.g.
* {@code http://dspace.myu.edu/sitemap?sitemap=}
* @param urlTail
* end of URL that sitemap files will appear at, e.g.
* {@code .html} or {@code null}
*/
public HTMLSitemapGenerator(File outputDirIn, String urlStem, String urlTail)
{
super(outputDirIn);
indexURLStem = urlStem;
indexURLTail = (urlTail == null ? "" : urlTail);
}
public String getFilename(int number)
{
return "sitemap" + number + ".html";
}
public String getLeadingBoilerPlate()
{
return "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.01//EN\" \"http://www.w3.org/TR/html4/strict.dtd\">\n"
+ "<html><head><title>URL List</title></head><body><ul>";
}
public int getMaxSize()
{
// 50k
return 51200;
}
public int getMaxURLs()
{
return 1000;
}
public String getTrailingBoilerPlate()
{
return "</ul></body></html>\n";
}
public String getURLText(String url, Date lastMod)
{
StringBuffer urlText = new StringBuffer();
urlText.append("<li><a href=\"").append(url).append("\">").append(url)
.append("</a></li>\n");
return urlText.toString();
}
public boolean useCompression()
{
return false;
}
public String getIndexFilename()
{
return "sitemap_index.html";
}
public void writeIndex(PrintStream output, int sitemapCount)
throws IOException
{
output.println(getLeadingBoilerPlate());
for (int i = 0; i < sitemapCount; i++)
{
output.print("<li><a href=\"" + indexURLStem + i + indexURLTail
+ "\">sitemap " + i);
output.print("</a></li>\n");
}
output.println(getTrailingBoilerPlate());
}
}
| Java |
/**
* The contents of this file are subject to the license and copyright
* detailed in the LICENSE and NOTICE files at the root of the source
* tree and available online at
*
* http://www.dspace.org/license/
*/
package org.dspace.app.launcher;
import org.dspace.core.ConfigurationManager;
import org.dspace.servicemanager.DSpaceKernelImpl;
import org.dspace.servicemanager.DSpaceKernelInit;
import org.dspace.services.RequestService;
import org.jdom.Document;
import org.jdom.Element;
import org.jdom.input.SAXBuilder;
import java.util.List;
import java.lang.reflect.Method;
/**
* A DSpace script launcher.
*
* @author Stuart Lewis
* @author Mark Diggory
*/
public class ScriptLauncher
{
/** The service manager kernel */
private static transient DSpaceKernelImpl kernelImpl;
/**
* Execute the DSpace script launcher
*
* @param args Any parameters required to be passed to the scripts it executes
*/
public static void main(String[] args)
{
// Check that there is at least one argument
if (args.length < 1)
{
System.err.println("You must provide at least one command argument");
display();
System.exit(1);
}
// Initialise the service manager kernel
try {
kernelImpl = DSpaceKernelInit.getKernel(null);
if (!kernelImpl.isRunning())
{
kernelImpl.start(ConfigurationManager.getProperty("dspace.dir"));
}
} catch (Exception e)
{
// Failed to start so destroy it and log and throw an exception
try
{
kernelImpl.destroy();
}
catch (Exception e1)
{
// Nothing to do
}
String message = "Failure during filter init: " + e.getMessage();
System.err.println(message + ":" + e);
throw new IllegalStateException(message, e);
}
// Parse the configuration file looking for the command entered
Document doc = getConfig();
String request = args[0];
Element root = doc.getRootElement();
List<Element> commands = root.getChildren("command");
for (Element command : commands)
{
if (request.equalsIgnoreCase(command.getChild("name").getValue()))
{
// Run each step
List<Element> steps = command.getChildren("step");
for (Element step : steps)
{
// Instantiate the class
Class target = null;
// Is it the special case 'dsrun' where the user provides the class name?
String className;
if ("dsrun".equals(request))
{
if (args.length < 2)
{
System.err.println("Error in launcher.xml: Missing class name");
System.exit(1);
}
className = args[1];
}
else {
className = step.getChild("class").getValue();
}
try
{
target = Class.forName(className,
true,
Thread.currentThread().getContextClassLoader());
}
catch (ClassNotFoundException e)
{
System.err.println("Error in launcher.xml: Invalid class name: " + className);
System.exit(1);
}
// Strip the leading argument from the args, and add the arguments
// Set <passargs>false</passargs> if the arguments should not be passed on
String[] useargs = args.clone();
Class[] argTypes = {useargs.getClass()};
boolean passargs = true;
if ((step.getAttribute("passuserargs") != null) &&
("false".equalsIgnoreCase(step.getAttribute("passuserargs").getValue())))
{
passargs = false;
}
if ((args.length == 1) || (("dsrun".equals(request)) && (args.length == 2)) || (!passargs))
{
useargs = new String[0];
}
else
{
// The number of arguments to ignore
// If dsrun is the command, ignore the next, as it is the class name not an arg
int x = 1;
if ("dsrun".equals(request))
{
x = 2;
}
String[] argsnew = new String[useargs.length - x];
for (int i = x; i < useargs.length; i++)
{
argsnew[i - x] = useargs[i];
}
useargs = argsnew;
}
// Add any extra properties
List<Element> bits = step.getChildren("argument");
if (step.getChild("argument") != null)
{
String[] argsnew = new String[useargs.length + bits.size()];
int i = 0;
for (Element arg : bits)
{
argsnew[i++] = arg.getValue();
}
for (; i < bits.size() + useargs.length; i++)
{
argsnew[i] = useargs[i - bits.size()];
}
useargs = argsnew;
}
// Establish the request service startup
RequestService requestService = kernelImpl.getServiceManager().getServiceByName(RequestService.class.getName(), RequestService.class);
if (requestService == null) {
throw new IllegalStateException("Could not get the DSpace RequestService to start the request transaction");
}
// Establish a request related to the current session
// that will trigger the various request listeners
requestService.startRequest();
// Run the main() method
try
{
Object[] arguments = {useargs};
// Useful for debugging, so left in the code...
/**System.out.print("About to execute: " + className);
for (String param : useargs)
{
System.out.print(" " + param);
}
System.out.println("");**/
Method main = target.getMethod("main", argTypes);
main.invoke(null, arguments);
// ensure we close out the request (happy request)
requestService.endRequest(null);
}
catch (Exception e)
{
// Failure occurred in the request so we destroy it
requestService.endRequest(e);
if (kernelImpl != null)
{
kernelImpl.destroy();
kernelImpl = null;
}
// Exceptions from the script are reported as a 'cause'
Throwable cause = e.getCause();
System.err.println("Exception: " + cause.getMessage());
cause.printStackTrace();
System.exit(1);
}
}
// Destroy the service kernel
if (kernelImpl != null)
{
kernelImpl.destroy();
kernelImpl = null;
}
// Everything completed OK
System.exit(0);
}
}
// Destroy the service kernel if it is still alive
if (kernelImpl != null)
{
kernelImpl.destroy();
kernelImpl = null;
}
// The command wasn't found
System.err.println("Command not found: " + args[0]);
display();
System.exit(1);
}
/**
* Load the launcher configuration file
*
* @return The XML configuration file Document
*/
private static Document getConfig()
{
// Load the launcher configuration file
String config = ConfigurationManager.getProperty("dspace.dir") +
System.getProperty("file.separator") + "config" +
System.getProperty("file.separator") + "launcher.xml";
SAXBuilder saxBuilder = new SAXBuilder();
Document doc = null;
try
{
doc = saxBuilder.build(config);
}
catch (Exception e)
{
System.err.println("Unable to load the launcher configuration file: [dspace]/config/launcher.xml");
System.err.println(e.getMessage());
System.exit(1);
}
return doc;
}
/**
* Display the commands that the current launcher config file knows about
*/
private static void display()
{
Document doc = getConfig();
List<Element> commands = doc.getRootElement().getChildren("command");
System.out.println("Usage: dspace [command-name] {parameters}");
for (Element command : commands)
{
System.out.println(" - " + command.getChild("name").getValue() +
": " + command.getChild("description").getValue());
}
}
}
| Java |
/**
* The contents of this file are subject to the license and copyright
* detailed in the LICENSE and NOTICE files at the root of the source
* tree and available online at
*
* http://www.dspace.org/license/
*/
package org.dspace.app.mediafilter;
import java.awt.Graphics2D;
import java.awt.image.BufferedImage;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.InputStream;
import javax.imageio.ImageIO;
import org.dspace.core.ConfigurationManager;
/**
* Filter image bitstreams, scaling the image to be within the bounds of
* thumbnail.maxwidth, thumbnail.maxheight, the size we want our thumbnail to be
* no bigger than. Creates only JPEGs.
*/
public class JPEGFilter extends MediaFilter implements SelfRegisterInputFormats
{
public String getFilteredName(String oldFilename)
{
return oldFilename + ".jpg";
}
/**
* @return String bundle name
*
*/
public String getBundleName()
{
return "THUMBNAIL";
}
/**
* @return String bitstreamformat
*/
public String getFormatString()
{
return "JPEG";
}
/**
* @return String description
*/
public String getDescription()
{
return "Generated Thumbnail";
}
/**
* @param source
* source input stream
*
* @return InputStream the resulting input stream
*/
public InputStream getDestinationStream(InputStream source)
throws Exception
{
// read in bitstream's image
BufferedImage buf = ImageIO.read(source);
// get config params
float xmax = (float) ConfigurationManager
.getIntProperty("thumbnail.maxwidth");
float ymax = (float) ConfigurationManager
.getIntProperty("thumbnail.maxheight");
// now get the image dimensions
float xsize = (float) buf.getWidth(null);
float ysize = (float) buf.getHeight(null);
// if verbose flag is set, print out dimensions
// to STDOUT
if (MediaFilterManager.isVerbose)
{
System.out.println("original size: " + xsize + "," + ysize);
}
// scale by x first if needed
if (xsize > xmax)
{
// calculate scaling factor so that xsize * scale = new size (max)
float scale_factor = xmax / xsize;
// if verbose flag is set, print out extracted text
// to STDOUT
if (MediaFilterManager.isVerbose)
{
System.out.println("x scale factor: " + scale_factor);
}
// now reduce x size
// and y size
xsize = xsize * scale_factor;
ysize = ysize * scale_factor;
// if verbose flag is set, print out extracted text
// to STDOUT
if (MediaFilterManager.isVerbose)
{
System.out.println("new size: " + xsize + "," + ysize);
}
}
// scale by y if needed
if (ysize > ymax)
{
float scale_factor = ymax / ysize;
// now reduce x size
// and y size
xsize = xsize * scale_factor;
ysize = ysize * scale_factor;
}
// if verbose flag is set, print details to STDOUT
if (MediaFilterManager.isVerbose)
{
System.out.println("created thumbnail size: " + xsize + ", "
+ ysize);
}
// create an image buffer for the thumbnail with the new xsize, ysize
BufferedImage thumbnail = new BufferedImage((int) xsize, (int) ysize,
BufferedImage.TYPE_INT_RGB);
// now render the image into the thumbnail buffer
Graphics2D g2d = thumbnail.createGraphics();
g2d.drawImage(buf, 0, 0, (int) xsize, (int) ysize, null);
// now create an input stream for the thumbnail buffer and return it
ByteArrayOutputStream baos = new ByteArrayOutputStream();
ImageIO.write(thumbnail, "jpeg", baos);
// now get the array
ByteArrayInputStream bais = new ByteArrayInputStream(baos.toByteArray());
return bais; // hope this gets written out before its garbage collected!
}
public String[] getInputMIMETypes()
{
return ImageIO.getReaderMIMETypes();
}
public String[] getInputDescriptions()
{
return null;
}
public String[] getInputExtensions()
{
// Temporarily disabled as JDK 1.6 only
// return ImageIO.getReaderFileSuffixes();
return null;
}
}
| Java |
/**
* The contents of this file are subject to the license and copyright
* detailed in the LICENSE and NOTICE files at the root of the source
* tree and available online at
*
* http://www.dspace.org/license/
*/
package org.dspace.app.mediafilter;
import java.awt.Graphics2D;
import java.awt.image.BufferedImage;
import java.io.BufferedReader;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.io.OutputStream;
import java.util.Arrays;
import java.util.regex.MatchResult;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import javax.imageio.ImageIO;
import org.apache.log4j.Logger;
import org.dspace.core.ConfigurationManager;
import org.dspace.core.Utils;
/**
* Thumbnail MediaFilter for PDF sources
*
* This filter generates thumbnail images for PDF documents, _including_
* 3D PDF documents with 2D "poster" images. Since the PDFBox library
* does not understand these, and fails to render a lot of other PDFs,
* this filter forks a process running the "pdftoppm" program from the
* XPdf suite -- see http://www.foolabs.com/xpdf/
* This is a suite of open-source PDF tools that has been widely ported
* to Unix platforms and the ones we use (pdftoppm, pdfinfo) even
* run on Win32.
*
* This was written for the FACADE project but it is not directly connected
* to any of the other FACADE-specific software. The FACADE UI expects
* to find thumbnail images for 3D PDFs generated by this filter.
*
* Requires DSpace config properties keys:
*
* xpdf.path.pdftoppm -- absolute path to "pdftoppm" executable (required!)
* xpdf.path.pdfinfo -- absolute path to "pdfinfo" executable (required!)
* thumbnail.maxwidth -- borrowed from thumbnails, max dim of generated image
*
* @author Larry Stone
* @see org.dspace.app.mediafilter.MediaFilter
*/
public class XPDF2Thumbnail extends MediaFilter
{
private static Logger log = Logger.getLogger(XPDF2Thumbnail.class);
// maximum size of either preview image dimension
private static final int MAX_PX = 800;
// maxium DPI - use common screen res, 100dpi.
private static final int MAX_DPI = 100;
// command to get image from PDF; @FILE@, @OUTPUT@ are placeholders
private static final String XPDF_PDFTOPPM_COMMAND[] =
{
"@COMMAND@", "-q", "-f", "1", "-l", "1",
"-r", "@DPI@", "@FILE@", "@OUTPUTFILE@"
};
// command to get image from PDF; @FILE@, @OUTPUT@ are placeholders
private static final String XPDF_PDFINFO_COMMAND[] =
{
"@COMMAND@", "-f", "1", "-l", "1", "-box", "@FILE@"
};
// executable path for "pdftoppm", comes from DSpace config at runtime.
private String pdftoppmPath = null;
// executable path for "pdfinfo", comes from DSpace config at runtime.
private String pdfinfoPath = null;
// match line in pdfinfo output that describes file's MediaBox
private static final Pattern MEDIABOX_PATT = Pattern.compile(
"^Page\\s+\\d+\\s+MediaBox:\\s+([\\.\\d-]+)\\s+([\\.\\d-]+)\\s+([\\.\\d-]+)\\s+([\\.\\d-]+)");
// also from thumbnail.maxwidth in config
private int maxwidth = 0;
// backup default for size, on the large side.
private static final int DEFAULT_MAXWIDTH = 500;
public String getFilteredName(String oldFilename)
{
return oldFilename + ".jpg";
}
public String getBundleName()
{
return "THUMBNAIL";
}
public String getFormatString()
{
return "JPEG";
}
public String getDescription()
{
return "Generated Thumbnail";
}
// canonical MediaFilter method to generate the thumbnail as stream.
public InputStream getDestinationStream(InputStream sourceStream)
throws Exception
{
// sanity check: xpdf paths are required. can cache since it won't change
if (pdftoppmPath == null || pdfinfoPath == null)
{
pdftoppmPath = ConfigurationManager.getProperty("xpdf.path.pdftoppm");
pdfinfoPath = ConfigurationManager.getProperty("xpdf.path.pdfinfo");
if (pdftoppmPath == null)
{
throw new IllegalStateException("No value for key \"xpdf.path.pdftoppm\" in DSpace configuration! Should be path to XPDF pdftoppm executable.");
}
if (pdfinfoPath == null)
{
throw new IllegalStateException("No value for key \"xpdf.path.pdfinfo\" in DSpace configuration! Should be path to XPDF pdfinfo executable.");
}
maxwidth = ConfigurationManager.getIntProperty("thumbnail.maxwidth");
if (maxwidth == 0)
{
maxwidth = DEFAULT_MAXWIDTH;
}
}
// make local file copy of source PDF since the PDF tools
// require a file for random access.
// XXX fixme would be nice to optimize this if we ever get
// XXX a DSpace method to access (optionally!) the _file_ of
// a Bitstream in the asset store, only when there is one of course.
File sourceTmp = File.createTempFile("DSfilt",".pdf");
sourceTmp.deleteOnExit();
int status = 0;
BufferedImage source = null;
try
{
OutputStream sto = new FileOutputStream(sourceTmp);
Utils.copy(sourceStream, sto);
sto.close();
sourceStream.close();
// First get max physical dim of bounding box of first page
// to compute the DPI to ask for.. otherwise some AutoCAD
// drawings can produce enormous files even at 75dpi, for
// 48" drawings..
// run pdfinfo, look for MediaBox description in the output, e.g.
// "Page 1 MediaBox: 0.00 0.00 612.00 792.00"
//
int dpi = 0;
String pdfinfoCmd[] = XPDF_PDFINFO_COMMAND.clone();
pdfinfoCmd[0] = pdfinfoPath;
pdfinfoCmd[pdfinfoCmd.length-1] = sourceTmp.toString();
BufferedReader lr = null;
try
{
MatchResult mediaBox = null;
Process pdfProc = Runtime.getRuntime().exec(pdfinfoCmd);
lr = new BufferedReader(new InputStreamReader(pdfProc.getInputStream()));
String line;
for (line = lr.readLine(); line != null; line = lr.readLine())
{
// if (line.matches(MEDIABOX_PATT))
Matcher mm = MEDIABOX_PATT.matcher(line);
if (mm.matches())
{
mediaBox = mm.toMatchResult();
}
}
int istatus = pdfProc.waitFor();
if (istatus != 0)
{
log.error("XPDF pdfinfo proc failed, exit status=" + istatus + ", file=" + sourceTmp);
}
if (mediaBox == null)
{
log.error("Sanity check: Did not find \"MediaBox\" line in output of XPDF pdfinfo, file="+sourceTmp);
throw new IllegalArgumentException("Failed to get MediaBox of PDF with pdfinfo, cannot compute thumbnail.");
}
else
{
double x0 = Double.parseDouble(mediaBox.group(1));
double y0 = Double.parseDouble(mediaBox.group(2));
double x1 = Double.parseDouble(mediaBox.group(3));
double y1 = Double.parseDouble(mediaBox.group(4));
int maxdim = (int)Math.max(Math.abs(x1 - x0), Math.abs(y1 - y0));
dpi = Math.min(MAX_DPI, (MAX_PX * 72 / maxdim));
log.debug("DPI: pdfinfo method got dpi="+dpi+" for max dim="+maxdim+" (points, 1/72\")");
}
}
catch (InterruptedException e)
{
log.error("Failed transforming file for preview: ",e);
throw new IllegalArgumentException("Failed transforming file for thumbnail: ",e);
}
catch (NumberFormatException e)
{
log.error("Failed interpreting pdfinfo results, check regexp: ",e);
throw new IllegalArgumentException("Failed transforming file for thumbnail: ",e);
}
finally
{
if (lr != null)
{
lr.close();
}
}
// Render page 1 using xpdf's pdftoppm
// Requires Sun JAI imageio additions to read ppm directly.
// this will get "-000001.ppm" appended to it by pdftoppm
File outPrefixF = File.createTempFile("prevu","out");
String outPrefix = outPrefixF.toString();
if (!outPrefixF.delete())
{
log.error("Unable to delete output file");
}
String pdfCmd[] = XPDF_PDFTOPPM_COMMAND.clone();
pdfCmd[0] = pdftoppmPath;
pdfCmd[pdfCmd.length-3] = String.valueOf(dpi);
pdfCmd[pdfCmd.length-2] = sourceTmp.toString();
pdfCmd[pdfCmd.length-1] = outPrefix;
File outf = new File(outPrefix+"-000001.ppm");
log.debug("Running xpdf command: "+Arrays.deepToString(pdfCmd));
try
{
Process pdfProc = Runtime.getRuntime().exec(pdfCmd);
status = pdfProc.waitFor();
log.debug("PDFTOPPM output is: "+outf+", exists="+outf.exists());
source = ImageIO.read(outf);
}
catch (InterruptedException e)
{
log.error("Failed transforming file for preview: ",e);
throw new IllegalArgumentException("Failed transforming file for preview: ",e);
}
finally
{
if (!outf.delete())
{
log.error("Unable to delete file");
}
}
}
finally
{
if (!sourceTmp.delete())
{
log.error("Unable to delete temporary source");
}
if (status != 0)
{
log.error("PDF conversion proc failed, exit status=" + status + ", file=" + sourceTmp);
}
}
if (source == null)
{
throw new IOException("Unknown failure while transforming file to preview: no image produced.");
}
// Scale image and return in-memory stream
BufferedImage toenail = scaleImage(source, maxwidth*3/4, maxwidth);
ByteArrayOutputStream baos = new ByteArrayOutputStream();
ImageIO.write(toenail, "jpeg", baos);
return new ByteArrayInputStream(baos.toByteArray());
}
// scale the image, preserving aspect ratio, if at least one
// dimension is not between min and max.
private static BufferedImage scaleImage(BufferedImage source,
int min, int max)
{
int xsize = source.getWidth(null);
int ysize = source.getHeight(null);
int msize = Math.max(xsize, ysize);
BufferedImage result = null;
// scale the image if it's outside of requested range.
// ALSO pass through if min and max are both 0
if ((min == 0 && max == 0) ||
(msize >= min && Math.min(xsize, ysize) <= max))
{
return source;
}
else
{
int xnew = xsize * max / msize;
int ynew = ysize * max / msize;
result = new BufferedImage(xnew, ynew, BufferedImage.TYPE_INT_RGB);
Graphics2D g2d = result.createGraphics();
g2d.drawImage(source, 0, 0, xnew, ynew, null);
return result;
}
}
}
| Java |
/**
* The contents of this file are subject to the license and copyright
* detailed in the LICENSE and NOTICE files at the root of the source
* tree and available online at
*
* http://www.dspace.org/license/
*/
package org.dspace.app.mediafilter;
import org.dspace.content.Bitstream;
import org.dspace.content.Item;
import org.dspace.core.Context;
/**
* Abstract class which defines the default settings for a *simple* Media or Format Filter.
* This class may be extended by any class which wishes to define a simple filter to be run
* by the MediaFilterManager. More complex filters should likely implement the FormatFilter
* interface directly, so that they can define their own pre/postProcessing methods.
*/
public abstract class MediaFilter implements FormatFilter
{
/**
* Perform any pre-processing of the source bitstream *before* the actual
* filtering takes place in MediaFilterManager.processBitstream().
* <p>
* Return true if pre-processing is successful (or no pre-processing
* is necessary). Return false if bitstream should be skipped
* for any reason.
*
*
* @param c
* context
* @param item
* item containing bitstream to process
* @param source
* source bitstream to be processed
*
* @return true if bitstream processing should continue,
* false if this bitstream should be skipped
*/
public boolean preProcessBitstream(Context c, Item item, Bitstream source)
throws Exception
{
return true; //default to no pre-processing
}
/**
* Perform any post-processing of the generated bitstream *after* this
* filter has already been run.
* <p>
* Return true if pre-processing is successful (or no pre-processing
* is necessary). Return false if bitstream should be skipped
* for some reason.
*
*
* @param c
* context
* @param item
* item containing bitstream to process
* @param generatedBitstream
* the bitstream which was generated by
* this filter.
*/
public void postProcessBitstream(Context c, Item item, Bitstream generatedBitstream)
throws Exception
{
//default to no post-processing necessary
}
}
| Java |
/**
* The contents of this file are subject to the license and copyright
* detailed in the LICENSE and NOTICE files at the root of the source
* tree and available online at
*
* http://www.dspace.org/license/
*/
package org.dspace.app.mediafilter;
import java.io.InputStream;
import org.dspace.content.Bitstream;
import org.dspace.content.Item;
import org.dspace.core.Context;
/**
* Public interface for any class which transforms or converts content/bitstreams
* from one format to another. This interface should be implemented by any class
* which defines a "filter" to be run by the MediaFilterManager.
*/
public interface FormatFilter
{
/**
* Get a filename for a newly created filtered bitstream
*
* @param sourceName
* name of source bitstream
* @return filename generated by the filter - for example, document.pdf
* becomes document.pdf.txt
*/
public String getFilteredName(String sourceName);
/**
* @return name of the bundle this filter will stick its generated
* Bitstreams
*/
public String getBundleName();
/**
* @return name of the bitstream format (say "HTML" or "Microsoft Word")
* returned by this filter look in the bitstream format registry or
* mediafilter.cfg for valid format strings.
*/
public String getFormatString();
/**
* @return string to describe the newly-generated Bitstream's - how it was
* produced is a good idea
*/
public String getDescription();
/**
* @param source
* input stream
*
* @return result of filter's transformation, written out to a bitstream
*/
public InputStream getDestinationStream(InputStream source)
throws Exception;
/**
* Perform any pre-processing of the source bitstream *before* the actual
* filtering takes place in MediaFilterManager.processBitstream().
* <p>
* Return true if pre-processing is successful (or no pre-processing
* is necessary). Return false if bitstream should be skipped
* for any reason.
*
*
* @param c
* context
* @param item
* item containing bitstream to process
* @param source
* source bitstream to be processed
*
* @return true if bitstream processing should continue,
* false if this bitstream should be skipped
*/
public boolean preProcessBitstream(Context c, Item item, Bitstream source)
throws Exception;
/**
* Perform any post-processing of the generated bitstream *after* this
* filter has already been run.
* <p>
* Return true if pre-processing is successful (or no pre-processing
* is necessary). Return false if bitstream should be skipped
* for some reason.
*
*
* @param c
* context
* @param item
* item containing bitstream to process
* @param generatedBitstream
* the bitstream which was generated by
* this filter.
*/
public void postProcessBitstream(Context c, Item item, Bitstream generatedBitstream)
throws Exception;
}
| Java |
/**
* The contents of this file are subject to the license and copyright
* detailed in the LICENSE and NOTICE files at the root of the source
* tree and available online at
*
* http://www.dspace.org/license/
*/
package org.dspace.app.mediafilter;
import java.awt.Graphics2D;
import java.awt.image.BufferedImage;
import java.awt.Font;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.InputStream;
import javax.imageio.ImageIO;
import org.dspace.core.ConfigurationManager;
/**
* Filter image bitstreams, scaling the image to be within the bounds of
* thumbnail.maxwidth, thumbnail.maxheight, the size we want our thumbnail to be
* no bigger than. Creates only JPEGs.
*/
public class BrandedPreviewJPEGFilter extends MediaFilter
{
public String getFilteredName(String oldFilename)
{
return oldFilename + ".preview.jpg";
}
/**
* @return String bundle name
*
*/
public String getBundleName()
{
return "BRANDED_PREVIEW";
}
/**
* @return String bitstreamformat
*/
public String getFormatString()
{
return "JPEG";
}
/**
* @return String description
*/
public String getDescription()
{
return "Generated Branded Preview";
}
/**
* @param source
* source input stream
*
* @return InputStream the resulting input stream
*/
public InputStream getDestinationStream(InputStream source)
throws Exception
{
// read in bitstream's image
BufferedImage buf = ImageIO.read(source);
// get config params
float xmax = (float) ConfigurationManager
.getIntProperty("webui.preview.maxwidth");
float ymax = (float) ConfigurationManager
.getIntProperty("webui.preview.maxheight");
int brandHeight = ConfigurationManager.getIntProperty("webui.preview.brand.height");
String brandFont = ConfigurationManager.getProperty("webui.preview.brand.font");
int brandFontPoint = ConfigurationManager.getIntProperty("webui.preview.brand.fontpoint");
// now get the image dimensions
float xsize = (float) buf.getWidth(null);
float ysize = (float) buf.getHeight(null);
// if verbose flag is set, print out dimensions
// to STDOUT
if (MediaFilterManager.isVerbose)
{
System.out.println("original size: " + xsize + "," + ysize);
}
// scale by x first if needed
if (xsize > xmax)
{
// calculate scaling factor so that xsize * scale = new size (max)
float scaleFactor = xmax / xsize;
// if verbose flag is set, print out extracted text
// to STDOUT
if (MediaFilterManager.isVerbose)
{
System.out.println("x scale factor: " + scaleFactor);
}
// now reduce x size
// and y size
xsize = xsize * scaleFactor;
ysize = ysize * scaleFactor;
// if verbose flag is set, print out extracted text
// to STDOUT
if (MediaFilterManager.isVerbose)
{
System.out.println("new size: " + xsize + "," + ysize);
}
}
// scale by y if needed
if (ysize > ymax)
{
float scaleFactor = ymax / ysize;
// now reduce x size
// and y size
xsize = xsize * scaleFactor;
ysize = ysize * scaleFactor;
}
// if verbose flag is set, print details to STDOUT
if (MediaFilterManager.isVerbose)
{
System.out.println("created thumbnail size: " + xsize + ", "
+ ysize);
}
// create an image buffer for the preview with the new xsize, ysize
// we add
BufferedImage branded = new BufferedImage((int) xsize, (int) ysize + brandHeight,
BufferedImage.TYPE_INT_RGB);
// now render the image into the preview buffer
Graphics2D g2d = branded.createGraphics();
g2d.drawImage(buf, 0, 0, (int) xsize, (int) ysize, null);
Brand brand = new Brand((int) xsize, brandHeight, new Font(brandFont, Font.PLAIN, brandFontPoint), 5);
BufferedImage brandImage = brand.create(ConfigurationManager.getProperty("webui.preview.brand"),
ConfigurationManager.getProperty("webui.preview.brand.abbrev"),
MediaFilterManager.getCurrentItem() == null ? "" : "hdl:" + MediaFilterManager.getCurrentItem().getHandle());
g2d.drawImage(brandImage, (int)0, (int)ysize, (int) xsize, (int) 20, null);
// now create an input stream for the thumbnail buffer and return it
ByteArrayOutputStream baos = new ByteArrayOutputStream();
ImageIO.write(branded, "jpeg", baos);
// now get the array
ByteArrayInputStream bais = new ByteArrayInputStream(baos.toByteArray());
return bais; // hope this gets written out before its garbage collected!
}
}
| Java |
/**
* The contents of this file are subject to the license and copyright
* detailed in the LICENSE and NOTICE files at the root of the source
* tree and available online at
*
* http://www.dspace.org/license/
*/
package org.dspace.app.mediafilter;
import java.io.BufferedInputStream;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.util.Arrays;
import org.apache.log4j.Logger;
import org.dspace.core.ConfigurationManager;
import org.dspace.core.Utils;
/**
* Text MediaFilter for PDF sources
*
* This filter produces extracted text suitable for building an index,
* but not for display to end users.
* It forks a process running the "pdftotext" program from the
* XPdf suite -- see http://www.foolabs.com/xpdf/
* This is a suite of open-source PDF tools that has been widely ported
* to Unix platforms and the ones we use (pdftoppm, pdftotext) even
* run on Win32.
*
* This was written for the FACADE project but it is not directly connected
* to any of the other FACADE-specific software. The FACADE UI expects
* to find thumbnail images for 3D PDFs generated by this filter.
*
* Requires DSpace config properties keys:
*
* xpdf.path.pdftotext -- path to "pdftotext" executable (required!)
*
* @author Larry Stone
* @see org.dspace.app.mediafilter.MediaFilter
*/
public class XPDF2Text extends MediaFilter
{
private static Logger log = Logger.getLogger(XPDF2Text.class);
// Command to get text from pdf; @infile@, @COMMAND@ are placeholders
private static final String XPDF_PDFTOTEXT_COMMAND[] =
{
"@COMMAND@", "-q", "-enc", "UTF-8", "@infile@", "-"
};
// executable path that comes from DSpace config at runtime.
private String pdftotextPath = null;
public String getFilteredName(String oldFilename)
{
return oldFilename + ".txt";
}
public String getBundleName()
{
return "TEXT";
}
public String getFormatString()
{
return "Text";
}
public String getDescription()
{
return "Extracted Text";
}
public InputStream getDestinationStream(InputStream sourceStream)
throws Exception
{
// get configured value for path to XPDF command:
if (pdftotextPath == null)
{
pdftotextPath = ConfigurationManager.getProperty("xpdf.path.pdftotext");
if (pdftotextPath == null)
{
throw new IllegalStateException("No value for key \"xpdf.path.pdftotext\" in DSpace configuration! Should be path to XPDF pdftotext executable.");
}
}
File sourceTmp = File.createTempFile("DSfilt",".pdf");
sourceTmp.deleteOnExit(); // extra insurance, we'll delete it here.
int status = -1;
try
{
// make local temp copy of source PDF since PDF tools
// require a file for random access.
// XXX fixme could optimize if we ever get an interface to grab asset *files*
OutputStream sto = new FileOutputStream(sourceTmp);
Utils.copy(sourceStream, sto);
sto.close();
sourceStream.close();
String pdfCmd[] = XPDF_PDFTOTEXT_COMMAND.clone();
pdfCmd[0] = pdftotextPath;
pdfCmd[4] = sourceTmp.toString();
log.debug("Running command: "+Arrays.deepToString(pdfCmd));
Process pdfProc = Runtime.getRuntime().exec(pdfCmd);
InputStream stdout = pdfProc.getInputStream();
ByteArrayOutputStream baos = new ByteArrayOutputStream();
Utils.copy(new BufferedInputStream(stdout), baos);
stdout.close();
baos.close();
status = pdfProc.waitFor();
String msg = null;
if (status == 1)
{
msg = "pdftotext failed opening input: file=" + sourceTmp.toString();
}
else if (status == 3)
{
msg = "pdftotext permission failure (perhaps copying of text from this document is not allowed - check PDF file's internal permissions): file=" + sourceTmp.toString();
}
else if (status != 0)
{
msg = "pdftotext failed, maybe corrupt PDF? status=" + String.valueOf(status);
}
if (msg != null)
{
log.error(msg);
throw new IOException(msg);
}
return new ByteArrayInputStream(baos.toByteArray());
}
catch (InterruptedException e)
{
log.error("Failed in pdftotext subprocess: ",e);
throw e;
}
finally
{
if (!sourceTmp.delete())
{
log.error("Unable to delete temporary file");
}
if (status != 0)
{
log.error("PDF conversion proc failed, returns=" + status + ", file=" + sourceTmp);
}
}
}
}
| Java |
/**
* The contents of this file are subject to the license and copyright
* detailed in the LICENSE and NOTICE files at the root of the source
* tree and available online at
*
* http://www.dspace.org/license/
*/
package org.dspace.app.mediafilter;
/**
* Identifier class, holding a single item of text and its location
* within a rectangular areas. Allowable locations are any of the four corners.
* This is a copy of Picture Australia's PiObj class re-organised with methods.
* Thanks to Ninh Nguyen at the National Library for providing the original source.
*/
class BrandText
{
/** Bottom Left */
public static final String BL = "bl";
/** Bottom Right */
public static final String BR = "br";
/** Top Left */
public static final String TL = "tl";
/** Top Right */
public static final String TR = "tr";
private String location;
private String text;
/**
* Constructor for an Identifier object containing a text string and
* its location within a rectangular area.
*
* @param location one of the class location constants e.g. <code>Identifier.BL</code>
* @param the text associated with the location
*/
public BrandText(String location, String text)
{
this.location = location;
this.text = text;
}
/**
* get the location the text of the Identifier object is associated with
*
* @return String one the class location constants e.g. <code>Identifier.BL</code>
*/
public String getLocation()
{
return location;
}
/**
* get the text associated with the Identifier object
*
* @return String the text associated with the Identifier object
*/
public String getText()
{
return text;
}
/**
* set the location associated with the Identifier object
*
* @param location one of the class location constants
*/
public void setLocation(String location)
{
this.location = location;
}
/**
* set the text associated with the Identifier object
*
* @param text any text string (typically a branding or identifier)
*/
public void setText(String text)
{
this.text = text;
}
}
| Java |
/**
* The contents of this file are subject to the license and copyright
* detailed in the LICENSE and NOTICE files at the root of the source
* tree and available online at
*
* http://www.dspace.org/license/
*/
package org.dspace.app.mediafilter;
import java.io.InputStream;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashMap;
import java.util.Iterator;
import java.util.Map;
import java.util.List;
import org.apache.commons.cli.CommandLine;
import org.apache.commons.cli.CommandLineParser;
import org.apache.commons.cli.HelpFormatter;
import org.apache.commons.cli.MissingArgumentException;
import org.apache.commons.cli.Option;
import org.apache.commons.cli.OptionBuilder;
import org.apache.commons.cli.Options;
import org.apache.commons.cli.PosixParser;
import org.apache.commons.lang.ArrayUtils;
import org.dspace.authorize.AuthorizeManager;
import org.dspace.content.Bitstream;
import org.dspace.content.BitstreamFormat;
import org.dspace.content.Bundle;
import org.dspace.content.Collection;
import org.dspace.content.Community;
import org.dspace.content.DCDate;
import org.dspace.content.DSpaceObject;
import org.dspace.content.Item;
import org.dspace.content.ItemIterator;
import org.dspace.core.ConfigurationManager;
import org.dspace.core.Constants;
import org.dspace.core.Context;
import org.dspace.core.PluginManager;
import org.dspace.core.SelfNamedPlugin;
import org.dspace.handle.HandleManager;
import org.dspace.search.DSIndexer;
/**
* MediaFilterManager is the class that invokes the media/format filters over the
* repository's content. a few command line flags affect the operation of the
* MFM: -v verbose outputs all extracted text to STDOUT; -f force forces all
* bitstreams to be processed, even if they have been before; -n noindex does not
* recreate index after processing bitstreams; -i [identifier] limits processing
* scope to a community, collection or item; and -m [max] limits processing to a
* maximum number of items.
*/
public class MediaFilterManager
{
//key (in dspace.cfg) which lists all enabled filters by name
public static final String MEDIA_FILTER_PLUGINS_KEY = "filter.plugins";
//prefix (in dspace.cfg) for all filter properties
public static final String FILTER_PREFIX = "filter";
//suffix (in dspace.cfg) for input formats supported by each filter
public static final String INPUT_FORMATS_SUFFIX = "inputFormats";
static boolean updateIndex = true; // default to updating index
static boolean isVerbose = false; // default to not verbose
static boolean isQuiet = false; // default is noisy
static boolean isForce = false; // default to not forced
static String identifier = null; // object scope limiter
static int max2Process = Integer.MAX_VALUE; // maximum number items to process
static int processed = 0; // number items processed
private static Item currentItem = null; // current item being processed
private static FormatFilter[] filterClasses = null;
private static Map<String, List<String>> filterFormats = new HashMap<String, List<String>>();
private static List<String> skipList = null; //list of identifiers to skip during processing
//separator in filterFormats Map between a filter class name and a plugin name,
//for MediaFilters which extend SelfNamedPlugin (\034 is "file separator" char)
public static final String FILTER_PLUGIN_SEPARATOR = "\034";
public static void main(String[] argv) throws Exception
{
// set headless for non-gui workstations
System.setProperty("java.awt.headless", "true");
// create an options object and populate it
CommandLineParser parser = new PosixParser();
int status = 0;
Options options = new Options();
options.addOption("v", "verbose", false,
"print all extracted text and other details to STDOUT");
options.addOption("q", "quiet", false,
"do not print anything except in the event of errors.");
options.addOption("f", "force", false,
"force all bitstreams to be processed");
options.addOption("n", "noindex", false,
"do NOT update the search index after filtering bitstreams");
options.addOption("i", "identifier", true,
"ONLY process bitstreams belonging to identifier");
options.addOption("m", "maximum", true,
"process no more than maximum items");
options.addOption("h", "help", false, "help");
//create a "plugin" option (to specify specific MediaFilter plugins to run)
OptionBuilder.withLongOpt("plugins");
OptionBuilder.withValueSeparator(',');
OptionBuilder.withDescription(
"ONLY run the specified Media Filter plugin(s)\n" +
"listed from '" + MEDIA_FILTER_PLUGINS_KEY + "' in dspace.cfg.\n" +
"Separate multiple with a comma (,)\n" +
"(e.g. MediaFilterManager -p \n\"Word Text Extractor\",\"PDF Text Extractor\")");
Option pluginOption = OptionBuilder.create('p');
pluginOption.setArgs(Option.UNLIMITED_VALUES); //unlimited number of args
options.addOption(pluginOption);
//create a "skip" option (to specify communities/collections/items to skip)
OptionBuilder.withLongOpt("skip");
OptionBuilder.withValueSeparator(',');
OptionBuilder.withDescription(
"SKIP the bitstreams belonging to identifier\n" +
"Separate multiple identifiers with a comma (,)\n" +
"(e.g. MediaFilterManager -s \n 123456789/34,123456789/323)");
Option skipOption = OptionBuilder.create('s');
skipOption.setArgs(Option.UNLIMITED_VALUES); //unlimited number of args
options.addOption(skipOption);
CommandLine line = null;
try
{
line = parser.parse(options, argv);
}
catch(MissingArgumentException e)
{
System.out.println("ERROR: " + e.getMessage());
HelpFormatter myhelp = new HelpFormatter();
myhelp.printHelp("MediaFilterManager\n", options);
System.exit(1);
}
if (line.hasOption('h'))
{
HelpFormatter myhelp = new HelpFormatter();
myhelp.printHelp("MediaFilterManager\n", options);
System.exit(0);
}
if (line.hasOption('v'))
{
isVerbose = true;
}
isQuiet = line.hasOption('q');
if (line.hasOption('n'))
{
updateIndex = false;
}
if (line.hasOption('f'))
{
isForce = true;
}
if (line.hasOption('i'))
{
identifier = line.getOptionValue('i');
}
if (line.hasOption('m'))
{
max2Process = Integer.parseInt(line.getOptionValue('m'));
if (max2Process <= 1)
{
System.out.println("Invalid maximum value '" +
line.getOptionValue('m') + "' - ignoring");
max2Process = Integer.MAX_VALUE;
}
}
String filterNames[] = null;
if(line.hasOption('p'))
{
//specified which media filter plugins we are using
filterNames = line.getOptionValues('p');
if(filterNames==null || filterNames.length==0)
{ //display error, since no plugins specified
System.err.println("\nERROR: -p (-plugin) option requires at least one plugin to be specified.\n" +
"(e.g. MediaFilterManager -p \"Word Text Extractor\",\"PDF Text Extractor\")\n");
HelpFormatter myhelp = new HelpFormatter();
myhelp.printHelp("MediaFilterManager\n", options);
System.exit(1);
}
}
else
{
//retrieve list of all enabled media filter plugins!
String enabledPlugins = ConfigurationManager.getProperty(MEDIA_FILTER_PLUGINS_KEY);
filterNames = enabledPlugins.split(",\\s*");
}
//initialize an array of our enabled filters
List<FormatFilter> filterList = new ArrayList<FormatFilter>();
//set up each filter
for(int i=0; i< filterNames.length; i++)
{
//get filter of this name & add to list of filters
FormatFilter filter = (FormatFilter) PluginManager.getNamedPlugin(FormatFilter.class, filterNames[i]);
if(filter==null)
{
System.err.println("\nERROR: Unknown MediaFilter specified (either from command-line or in dspace.cfg): '" + filterNames[i] + "'");
System.exit(1);
}
else
{
filterList.add(filter);
String filterClassName = filter.getClass().getName();
String pluginName = null;
//If this filter is a SelfNamedPlugin,
//then the input formats it accepts may differ for
//each "named" plugin that it defines.
//So, we have to look for every key that fits the
//following format: filter.<class-name>.<plugin-name>.inputFormats
if( SelfNamedPlugin.class.isAssignableFrom(filter.getClass()) )
{
//Get the plugin instance name for this class
pluginName = ((SelfNamedPlugin) filter).getPluginInstanceName();
}
//Retrieve our list of supported formats from dspace.cfg
//For SelfNamedPlugins, format of key is:
// filter.<class-name>.<plugin-name>.inputFormats
//For other MediaFilters, format of key is:
// filter.<class-name>.inputFormats
String formats = ConfigurationManager.getProperty(
FILTER_PREFIX + "." + filterClassName +
(pluginName!=null ? "." + pluginName : "") +
"." + INPUT_FORMATS_SUFFIX);
//add to internal map of filters to supported formats
if (formats != null)
{
//For SelfNamedPlugins, map key is:
// <class-name><separator><plugin-name>
//For other MediaFilters, map key is just:
// <class-name>
filterFormats.put(filterClassName +
(pluginName!=null ? FILTER_PLUGIN_SEPARATOR + pluginName : ""),
Arrays.asList(formats.split(",[\\s]*")));
}
}//end if filter!=null
}//end for
//If verbose, print out loaded mediafilter info
if(isVerbose)
{
System.out.println("The following MediaFilters are enabled: ");
Iterator<String> i = filterFormats.keySet().iterator();
while(i.hasNext())
{
String filterName = i.next();
System.out.println("Full Filter Name: " + filterName);
String pluginName = null;
if(filterName.contains(FILTER_PLUGIN_SEPARATOR))
{
String[] fields = filterName.split(FILTER_PLUGIN_SEPARATOR);
filterName=fields[0];
pluginName=fields[1];
}
System.out.println(filterName +
(pluginName!=null? " (Plugin: " + pluginName + ")": ""));
}
}
//store our filter list into an internal array
filterClasses = (FormatFilter[]) filterList.toArray(new FormatFilter[filterList.size()]);
//Retrieve list of identifiers to skip (if any)
String skipIds[] = null;
if(line.hasOption('s'))
{
//specified which identifiers to skip when processing
skipIds = line.getOptionValues('s');
if(skipIds==null || skipIds.length==0)
{ //display error, since no identifiers specified to skip
System.err.println("\nERROR: -s (-skip) option requires at least one identifier to SKIP.\n" +
"Make sure to separate multiple identifiers with a comma!\n" +
"(e.g. MediaFilterManager -s 123456789/34,123456789/323)\n");
HelpFormatter myhelp = new HelpFormatter();
myhelp.printHelp("MediaFilterManager\n", options);
System.exit(0);
}
//save to a global skip list
skipList = Arrays.asList(skipIds);
}
Context c = null;
try
{
c = new Context();
// have to be super-user to do the filtering
c.setIgnoreAuthorization(true);
// now apply the filters
if (identifier == null)
{
applyFiltersAllItems(c);
}
else // restrict application scope to identifier
{
DSpaceObject dso = HandleManager.resolveToObject(c, identifier);
if (dso == null)
{
throw new IllegalArgumentException("Cannot resolve "
+ identifier + " to a DSpace object");
}
switch (dso.getType())
{
case Constants.COMMUNITY:
applyFiltersCommunity(c, (Community)dso);
break;
case Constants.COLLECTION:
applyFiltersCollection(c, (Collection)dso);
break;
case Constants.ITEM:
applyFiltersItem(c, (Item)dso);
break;
}
}
// update search index?
if (updateIndex)
{
if (!isQuiet)
{
System.out.println("Updating search index:");
}
DSIndexer.setBatchProcessingMode(true);
try
{
DSIndexer.updateIndex(c);
}
finally
{
DSIndexer.setBatchProcessingMode(false);
}
}
c.complete();
c = null;
}
catch (Exception e)
{
status = 1;
}
finally
{
if (c != null)
{
c.abort();
}
}
System.exit(status);
}
public static void applyFiltersAllItems(Context c) throws Exception
{
if(skipList!=null)
{
//if a skip-list exists, we need to filter community-by-community
//so we can respect what is in the skip-list
Community[] topLevelCommunities = Community.findAllTop(c);
for(int i=0; i<topLevelCommunities.length; i++)
{
applyFiltersCommunity(c, topLevelCommunities[i]);
}
}
else
{
//otherwise, just find every item and process
ItemIterator i = Item.findAll(c);
try
{
while (i.hasNext() && processed < max2Process)
{
applyFiltersItem(c, i.next());
}
}
finally
{
if (i != null)
{
i.close();
}
}
}
}
public static void applyFiltersCommunity(Context c, Community community)
throws Exception
{ //only apply filters if community not in skip-list
if(!inSkipList(community.getHandle()))
{
Community[] subcommunities = community.getSubcommunities();
for (int i = 0; i < subcommunities.length; i++)
{
applyFiltersCommunity(c, subcommunities[i]);
}
Collection[] collections = community.getCollections();
for (int j = 0; j < collections.length; j++)
{
applyFiltersCollection(c, collections[j]);
}
}
}
public static void applyFiltersCollection(Context c, Collection collection)
throws Exception
{
//only apply filters if collection not in skip-list
if(!inSkipList(collection.getHandle()))
{
ItemIterator i = collection.getItems();
try
{
while (i.hasNext() && processed < max2Process)
{
applyFiltersItem(c, i.next());
}
}
finally
{
if (i != null)
{
i.close();
}
}
}
}
public static void applyFiltersItem(Context c, Item item) throws Exception
{
//only apply filters if item not in skip-list
if(!inSkipList(item.getHandle()))
{
//cache this item in MediaFilterManager
//so it can be accessed by MediaFilters as necessary
currentItem = item;
if (filterItem(c, item))
{
// commit changes after each filtered item
c.commit();
// increment processed count
++processed;
}
// clear item objects from context cache and internal cache
item.decache();
currentItem = null;
}
}
/**
* iterate through the item's bitstreams in the ORIGINAL bundle, applying
* filters if possible
*
* @return true if any bitstreams processed,
* false if none
*/
public static boolean filterItem(Context c, Item myItem) throws Exception
{
// get 'original' bundles
Bundle[] myBundles = myItem.getBundles("ORIGINAL");
boolean done = false;
for (int i = 0; i < myBundles.length; i++)
{
// now look at all of the bitstreams
Bitstream[] myBitstreams = myBundles[i].getBitstreams();
for (int k = 0; k < myBitstreams.length; k++)
{
done |= filterBitstream(c, myItem, myBitstreams[k]);
}
}
return done;
}
/**
* Attempt to filter a bitstream
*
* An exception will be thrown if the media filter class cannot be
* instantiated, exceptions from filtering will be logged to STDOUT and
* swallowed.
*
* @return true if bitstream processed,
* false if no applicable filter or already processed
*/
public static boolean filterBitstream(Context c, Item myItem,
Bitstream myBitstream) throws Exception
{
boolean filtered = false;
// iterate through filter classes. A single format may be actioned
// by more than one filter
for (int i = 0; i < filterClasses.length; i++)
{
//List fmts = (List)filterFormats.get(filterClasses[i].getClass().getName());
String pluginName = null;
//if this filter class is a SelfNamedPlugin,
//its list of supported formats is different for
//differently named "plugin"
if( SelfNamedPlugin.class.isAssignableFrom(filterClasses[i].getClass()) )
{
//get plugin instance name for this media filter
pluginName = ((SelfNamedPlugin)filterClasses[i]).getPluginInstanceName();
}
//Get list of supported formats for the filter (and possibly named plugin)
//For SelfNamedPlugins, map key is:
// <class-name><separator><plugin-name>
//For other MediaFilters, map key is just:
// <class-name>
List<String> fmts = filterFormats.get(filterClasses[i].getClass().getName() +
(pluginName!=null ? FILTER_PLUGIN_SEPARATOR + pluginName : ""));
if (fmts.contains(myBitstream.getFormat().getShortDescription()))
{
try
{
// only update item if bitstream not skipped
if (processBitstream(c, myItem, myBitstream, filterClasses[i]))
{
myItem.update(); // Make sure new bitstream has a sequence
// number
filtered = true;
}
}
catch (Exception e)
{
String handle = myItem.getHandle();
Bundle[] bundles = myBitstream.getBundles();
long size = myBitstream.getSize();
String checksum = myBitstream.getChecksum() + " ("+myBitstream.getChecksumAlgorithm()+")";
int assetstore = myBitstream.getStoreNumber();
// Printout helpfull information to find the errored bistream.
System.out.println("ERROR filtering, skipping bitstream:\n");
System.out.println("\tItem Handle: "+ handle);
for (Bundle bundle : bundles)
{
System.out.println("\tBundle Name: " + bundle.getName());
}
System.out.println("\tFile Size: " + size);
System.out.println("\tChecksum: " + checksum);
System.out.println("\tAsset Store: " + assetstore);
System.out.println(e);
e.printStackTrace();
}
}
else if (filterClasses[i] instanceof SelfRegisterInputFormats)
{
// Filter implements self registration, so check to see if it should be applied
// given the formats it claims to support
SelfRegisterInputFormats srif = (SelfRegisterInputFormats)filterClasses[i];
boolean applyFilter = false;
// Check MIME type
String[] mimeTypes = srif.getInputMIMETypes();
if (mimeTypes != null)
{
for (String mimeType : mimeTypes)
{
if (mimeType.equalsIgnoreCase(myBitstream.getFormat().getMIMEType()))
{
applyFilter = true;
}
}
}
// Check description
if (!applyFilter)
{
String[] descriptions = srif.getInputDescriptions();
if (descriptions != null)
{
for (String desc : descriptions)
{
if (desc.equalsIgnoreCase(myBitstream.getFormat().getShortDescription()))
{
applyFilter = true;
}
}
}
}
// Check extensions
if (!applyFilter)
{
String[] extensions = srif.getInputExtensions();
if (extensions != null)
{
for (String ext : extensions)
{
String[] formatExtensions = myBitstream.getFormat().getExtensions();
if (formatExtensions != null && ArrayUtils.contains(formatExtensions, ext))
{
applyFilter = true;
}
}
}
}
// Filter claims to handle this type of file, so attempt to apply it
if (applyFilter)
{
try
{
// only update item if bitstream not skipped
if (processBitstream(c, myItem, myBitstream, filterClasses[i]))
{
myItem.update(); // Make sure new bitstream has a sequence
// number
filtered = true;
}
}
catch (Exception e)
{
System.out.println("ERROR filtering, skipping bitstream #"
+ myBitstream.getID() + " " + e);
e.printStackTrace();
}
}
}
}
return filtered;
}
/**
* processBitstream is a utility class that calls the virtual methods
* from the current MediaFilter class.
* It scans the bitstreams in an item, and decides if a bitstream has
* already been filtered, and if not or if overWrite is set, invokes the
* filter.
*
* @param c
* context
* @param item
* item containing bitstream to process
* @param source
* source bitstream to process
* @param formatFilter
* FormatFilter to perform filtering
*
* @return true if new rendition is created, false if rendition already
* exists and overWrite is not set
*/
public static boolean processBitstream(Context c, Item item, Bitstream source, FormatFilter formatFilter)
throws Exception
{
//do pre-processing of this bitstream, and if it fails, skip this bitstream!
if(!formatFilter.preProcessBitstream(c, item, source))
{
return false;
}
boolean overWrite = MediaFilterManager.isForce;
// get bitstream filename, calculate destination filename
String newName = formatFilter.getFilteredName(source.getName());
Bitstream existingBitstream = null; // is there an existing rendition?
Bundle targetBundle = null; // bundle we're modifying
Bundle[] bundles = item.getBundles(formatFilter.getBundleName());
// check if destination bitstream exists
if (bundles.length > 0)
{
// only finds the last match (FIXME?)
for (int i = 0; i < bundles.length; i++)
{
Bitstream[] bitstreams = bundles[i].getBitstreams();
for (int j = 0; j < bitstreams.length; j++)
{
if (bitstreams[j].getName().equals(newName))
{
targetBundle = bundles[i];
existingBitstream = bitstreams[j];
}
}
}
}
// if exists and overwrite = false, exit
if (!overWrite && (existingBitstream != null))
{
if (!isQuiet)
{
System.out.println("SKIPPED: bitstream " + source.getID()
+ " (item: " + item.getHandle() + ") because '" + newName + "' already exists");
}
return false;
}
InputStream destStream = formatFilter.getDestinationStream(source.retrieve());
if (destStream == null)
{
if (!isQuiet)
{
System.out.println("SKIPPED: bitstream " + source.getID()
+ " (item: " + item.getHandle() + ") because filtering was unsuccessful");
}
return false;
}
// create new bundle if needed
if (bundles.length < 1)
{
targetBundle = item.createBundle(formatFilter.getBundleName());
}
else
{
// take the first match
targetBundle = bundles[0];
}
Bitstream b = targetBundle.createBitstream(destStream);
// Now set the format and name of the bitstream
b.setName(newName);
b.setSource("Written by FormatFilter " + formatFilter.getClass().getName() +
" on " + DCDate.getCurrent() + " (GMT).");
b.setDescription(formatFilter.getDescription());
// Find the proper format
BitstreamFormat bf = BitstreamFormat.findByShortDescription(c,
formatFilter.getFormatString());
b.setFormat(bf);
b.update();
//Inherit policies from the source bitstream
//(first remove any existing policies)
AuthorizeManager.removeAllPolicies(c, b);
AuthorizeManager.inheritPolicies(c, source, b);
// fixme - set date?
// we are overwriting, so remove old bitstream
if (existingBitstream != null)
{
targetBundle.removeBitstream(existingBitstream);
}
if (!isQuiet)
{
System.out.println("FILTERED: bitstream " + source.getID()
+ " (item: " + item.getHandle() + ") and created '" + newName + "'");
}
//do post-processing of the generated bitstream
formatFilter.postProcessBitstream(c, item, b);
return true;
}
/**
* Return the item that is currently being processed/filtered
* by the MediaFilterManager
* <p>
* This allows FormatFilters to retrieve the Item object
* in case they need access to item-level information for their format
* transformations/conversions.
*
* @return current Item being processed by MediaFilterManager
*/
public static Item getCurrentItem()
{
return currentItem;
}
/**
* Check whether or not to skip processing the given identifier
*
* @param identifier
* identifier (handle) of a community, collection or item
*
* @return true if this community, collection or item should be skipped
* during processing. Otherwise, return false.
*/
public static boolean inSkipList(String identifier)
{
if(skipList!=null && skipList.contains(identifier))
{
if (!isQuiet)
{
System.out.println("SKIP-LIST: skipped bitstreams within identifier " + identifier);
}
return true;
}
else
{
return false;
}
}
}
| Java |
/**
* The contents of this file are subject to the license and copyright
* detailed in the LICENSE and NOTICE files at the root of the source
* tree and available online at
*
* http://www.dspace.org/license/
*/
package org.dspace.app.mediafilter;
import java.awt.image.BufferedImage;
import java.awt.Color;
import java.awt.Font;
import java.awt.FontMetrics;
import java.awt.Graphics2D;
import java.awt.Rectangle;
/**
* Class to attach a footer to an image using ImageMagick.
* Thanks Ninh Nguyen from the National Library of Australia for providing the source code.
* This version of the code is basically Ninh's but reorganised a little. Used with permission.
*/
public class Brand
{
private int brandWidth;
private int brandHeight;
private Font font;
private int xOffset;
/**
* Constructor to set up footer image attributes.
*
* @param brandWidth length of the footer in pixels
* @param brandHeight height of the footer in pixels
* @param font font to use for text on the footer
* @param xOffset number of pixels text should be indented from left-hand side of footer
*
*/
public Brand(int brandWidth,
int brandHeight,
Font font,
int xOffset)
{
this.brandWidth = brandWidth;
this.brandHeight = brandHeight;
this.font = font;
this.xOffset = xOffset;
}
/**
* Create the brand image
*
* @param brandLeftText text that should appear in the bottom left of the image
* @param shortLeftText abbreviated form of brandLeftText that will be substituted if
* the image is resized such that brandLeftText will not fit. <code>null</code> if not
* required
* @param brandRightText text that should appear in the bottom right of the image
*
* @return BufferedImage a BufferedImage object describing the brand image file
*/
public BufferedImage create(String brandLeftText,
String shortLeftText,
String brandRightText)
{
BrandText[] allBrandText = null;
BufferedImage brandImage =
new BufferedImage(brandWidth, brandHeight, BufferedImage.TYPE_INT_RGB);
if (brandWidth >= 350)
{
allBrandText = new BrandText[]
{
new BrandText(BrandText.BL, brandLeftText),
new BrandText(BrandText.BR, brandRightText)
};
}
else if (brandWidth >= 190)
{
allBrandText = new BrandText[]
{
new BrandText(BrandText.BL, shortLeftText),
new BrandText(BrandText.BR, brandRightText)
};
}
else
{
allBrandText = new BrandText[]
{
new BrandText(BrandText.BR, brandRightText)
};
}
if (allBrandText != null && allBrandText.length > 0)
{
for (int i = 0; i < allBrandText.length; ++i)
{
drawImage(brandImage, allBrandText[i]);
}
}
return brandImage;
}
/**
* do the text placements and preparatory work for the brand image generation
*
* @param brandImage a BufferedImage object where the image is created
* @param identifier and Identifier object describing what text is to be placed in what
* position within the brand
*/
private void drawImage(BufferedImage brandImage,
BrandText brandText)
{
int imgWidth = brandImage.getWidth();
int imgHeight = brandImage.getHeight();
int bx, by, tx, ty, bWidth, bHeight;
Graphics2D g2 = brandImage.createGraphics();
g2.setFont(font);
FontMetrics fm = g2.getFontMetrics();
bWidth = fm.stringWidth(brandText.getText()) + xOffset * 2 + 1;
bHeight = fm.getHeight();
bx = 0;
by = 0;
if (brandText.getLocation().equals(BrandText.TL))
{
bx = 0;
by = 0;
}
else if (brandText.getLocation().equals(BrandText.TR))
{
bx = imgWidth - bWidth;
by = 0;
}
else if (brandText.getLocation().equals(BrandText.BL))
{
bx = 0;
by = imgHeight - bHeight;
}
else if (brandText.getLocation().equals(BrandText.BR))
{
bx = imgWidth - bWidth;
by = imgHeight - bHeight;
}
Rectangle box = new Rectangle(bx, by, bWidth, bHeight);
tx = bx + xOffset;
ty = by + fm.getAscent();
g2.setColor(Color.black);
g2.fill(box);
g2.setColor(Color.white);
g2.drawString(brandText.getText(), tx, ty);
}
}
| Java |
/**
* The contents of this file are subject to the license and copyright
* detailed in the LICENSE and NOTICE files at the root of the source
* tree and available online at
*
* http://www.dspace.org/license/
*/
package org.dspace.app.mediafilter;
import java.io.ByteArrayInputStream;
import java.io.InputStream;
import java.io.IOException;
import org.apache.log4j.Logger;
import org.textmining.extraction.TextExtractor;
import org.textmining.extraction.word.WordTextExtractorFactory;
/*
*
* to do: helpful error messages - can't find mediafilter.cfg - can't
* instantiate filter - bitstream format doesn't exist
*
*/
public class WordFilter extends MediaFilter
{
private static Logger log = Logger.getLogger(WordFilter.class);
public String getFilteredName(String oldFilename)
{
return oldFilename + ".txt";
}
/**
* @return String bundle name
*
*/
public String getBundleName()
{
return "TEXT";
}
/**
* @return String bitstreamformat
*/
public String getFormatString()
{
return "Text";
}
/**
* @return String description
*/
public String getDescription()
{
return "Extracted text";
}
/**
* @param source
* source input stream
*
* @return InputStream the resulting input stream
*/
public InputStream getDestinationStream(InputStream source)
throws Exception
{
// get input stream from bitstream
// pass to filter, get string back
try
{
WordTextExtractorFactory factory = new WordTextExtractorFactory();
TextExtractor e = factory.textExtractor(source);
String extractedText = e.getText();
// if verbose flag is set, print out extracted text
// to STDOUT
if (MediaFilterManager.isVerbose)
{
System.out.println(extractedText);
}
// generate an input stream with the extracted text
byte[] textBytes = extractedText.getBytes();
ByteArrayInputStream bais = new ByteArrayInputStream(textBytes);
return bais; // will this work? or will the byte array be out of scope?
}
catch (IOException ioe)
{
System.out.println("Invalid Word Format");
log.error("Error detected - Word File format not recognized: " + ioe.getMessage(), ioe);
}
return null;
}
}
| Java |
/**
* The contents of this file are subject to the license and copyright
* detailed in the LICENSE and NOTICE files at the root of the source
* tree and available online at
*
* http://www.dspace.org/license/
*/
package org.dspace.app.mediafilter;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.io.InputStream;
import java.io.OutputStreamWriter;
import java.io.Writer;
import org.apache.log4j.Logger;
import org.apache.pdfbox.pdmodel.PDDocument;
import org.apache.pdfbox.util.PDFTextStripper;
import org.dspace.core.ConfigurationManager;
/*
*
* to do: helpful error messages - can't find mediafilter.cfg - can't
* instantiate filter - bitstream format doesn't exist
*
*/
public class PDFFilter extends MediaFilter
{
private static Logger log = Logger.getLogger(PDFFilter.class);
public String getFilteredName(String oldFilename)
{
return oldFilename + ".txt";
}
/**
* @return String bundle name
*
*/
public String getBundleName()
{
return "TEXT";
}
/**
* @return String bitstreamformat
*/
public String getFormatString()
{
return "Text";
}
/**
* @return String description
*/
public String getDescription()
{
return "Extracted text";
}
/**
* @param source
* source input stream
*
* @return InputStream the resulting input stream
*/
public InputStream getDestinationStream(InputStream source)
throws Exception
{
try
{
boolean useTemporaryFile = ConfigurationManager.getBooleanProperty("pdffilter.largepdfs", false);
// get input stream from bitstream
// pass to filter, get string back
PDFTextStripper pts = new PDFTextStripper();
PDDocument pdfDoc = null;
Writer writer = null;
File tempTextFile = null;
ByteArrayOutputStream byteStream = null;
if (useTemporaryFile)
{
tempTextFile = File.createTempFile("dspacepdfextract" + source.hashCode(), ".txt");
tempTextFile.deleteOnExit();
writer = new OutputStreamWriter(new FileOutputStream(tempTextFile));
}
else
{
byteStream = new ByteArrayOutputStream();
writer = new OutputStreamWriter(byteStream);
}
try
{
pdfDoc = PDDocument.load(source);
pts.writeText(pdfDoc, writer);
}
finally
{
try
{
if (pdfDoc != null)
{
pdfDoc.close();
}
}
catch(Exception e)
{
log.error("Error closing PDF file: " + e.getMessage(), e);
}
try
{
writer.close();
}
catch(Exception e)
{
log.error("Error closing temporary extract file: " + e.getMessage(), e);
}
}
if (useTemporaryFile)
{
return new FileInputStream(tempTextFile);
}
else
{
byte[] bytes = byteStream.toByteArray();
return new ByteArrayInputStream(bytes);
}
}
catch (OutOfMemoryError oome)
{
log.error("Error parsing PDF document " + oome.getMessage(), oome);
if (!ConfigurationManager.getBooleanProperty("pdffilter.skiponmemoryexception", false))
{
throw oome;
}
}
return null;
}
}
| Java |
/**
* The contents of this file are subject to the license and copyright
* detailed in the LICENSE and NOTICE files at the root of the source
* tree and available online at
*
* http://www.dspace.org/license/
*/
package org.dspace.app.mediafilter;
import java.io.ByteArrayInputStream;
import java.io.InputStream;
import javax.swing.text.Document;
import javax.swing.text.html.HTMLEditorKit;
/*
*
* to do: helpful error messages - can't find mediafilter.cfg - can't
* instantiate filter - bitstream format doesn't exist
*
*/
public class HTMLFilter extends MediaFilter
{
public String getFilteredName(String oldFilename)
{
return oldFilename + ".txt";
}
/**
* @return String bundle name
*
*/
public String getBundleName()
{
return "TEXT";
}
/**
* @return String bitstreamformat
*/
public String getFormatString()
{
return "Text";
}
/**
* @return String description
*/
public String getDescription()
{
return "Extracted text";
}
/**
* @param source
* source input stream
*
* @return InputStream the resulting input stream
*/
public InputStream getDestinationStream(InputStream source)
throws Exception
{
// try and read the document - set to ignore character set directive,
// assuming that the input stream is already set properly (I hope)
HTMLEditorKit kit = new HTMLEditorKit();
Document doc = kit.createDefaultDocument();
doc.putProperty("IgnoreCharsetDirective", Boolean.TRUE);
kit.read(source, doc, 0);
String extractedText = doc.getText(0, doc.getLength());
// generate an input stream with the extracted text
byte[] textBytes = extractedText.getBytes();
ByteArrayInputStream bais = new ByteArrayInputStream(textBytes);
return bais; // will this work? or will the byte array be out of scope?
}
}
| Java |
/**
* The contents of this file are subject to the license and copyright
* detailed in the LICENSE and NOTICE files at the root of the source
* tree and available online at
*
* http://www.dspace.org/license/
*/
package org.dspace.app.mediafilter;
import java.io.ByteArrayInputStream;
import java.io.InputStream;
import org.apache.poi.extractor.ExtractorFactory;
import org.apache.poi.xslf.extractor.XSLFPowerPointExtractor;
import org.apache.poi.hslf.extractor.PowerPointExtractor;
import org.apache.poi.POITextExtractor;
import org.apache.log4j.Logger;
/*
* TODO: Allow user to configure extraction of only text or only notes
*
*/
public class PowerPointFilter extends MediaFilter
{
private static Logger log = Logger.getLogger(PowerPointFilter.class);
public String getFilteredName(String oldFilename)
{
return oldFilename + ".txt";
}
/**
* @return String bundle name
*
*/
public String getBundleName()
{
return "TEXT";
}
/**
* @return String bitstreamformat
*
* TODO: Check that this is correct
*/
public String getFormatString()
{
return "Text";
}
/**
* @return String description
*/
public String getDescription()
{
return "Extracted text";
}
/**
* @param source
* source input stream
*
* @return InputStream the resulting input stream
*/
public InputStream getDestinationStream(InputStream source)
throws Exception
{
try
{
String extractedText = null;
POITextExtractor pptExtractor =
new ExtractorFactory().createExtractor(source);
// PowerPoint XML files and legacy format PowerPoint files
// require different classes and APIs for text extraction
// If this is a PowerPoint XML file, extract accordingly
if (pptExtractor instanceof XSLFPowerPointExtractor)
{
// The true method arguments indicate that text from
// the slides and the notes is desired
extractedText =
((XSLFPowerPointExtractor)pptExtractor).getText(true, true);
}
// Legacy PowerPoint files
else if (pptExtractor instanceof PowerPointExtractor)
{
extractedText = ((PowerPointExtractor)pptExtractor).getText()
+ " " + ((PowerPointExtractor)pptExtractor).getNotes();
}
if (extractedText != null)
{
// if verbose flag is set, print out extracted text
// to STDOUT
if (MediaFilterManager.isVerbose)
{
System.out.println(extractedText);
}
// generate an input stream with the extracted text
byte[] textBytes = extractedText.getBytes();
ByteArrayInputStream bais = new ByteArrayInputStream(textBytes);
return bais;
}
}
catch(Exception e)
{
log.error("Error filtering bitstream: " + e.getMessage(), e);
}
return null;
}
}
| Java |
/**
* The contents of this file are subject to the license and copyright
* detailed in the LICENSE and NOTICE files at the root of the source
* tree and available online at
*
* http://www.dspace.org/license/
*/
package org.dspace.app.mediafilter;
/**
* Interface to allow filters to register the input formats they handle
* (useful for exposing underlying capabilities of libraries used)
*/
public interface SelfRegisterInputFormats
{
public String[] getInputMIMETypes();
public String[] getInputDescriptions();
public String[] getInputExtensions();
}
| Java |
/**
* The contents of this file are subject to the license and copyright
* detailed in the LICENSE and NOTICE files at the root of the source
* tree and available online at
*
* http://www.dspace.org/license/
*/
package org.dspace.app.mets;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.net.URLEncoder;
import java.sql.SQLException;
import java.util.Date;
import java.util.Properties;
import org.apache.commons.cli.CommandLine;
import org.apache.commons.cli.CommandLineParser;
import org.apache.commons.cli.HelpFormatter;
import org.apache.commons.cli.Options;
import org.apache.commons.cli.PosixParser;
import org.dspace.authorize.AuthorizeException;
import org.dspace.authorize.AuthorizeManager;
import org.dspace.content.Bitstream;
import org.dspace.content.BitstreamFormat;
import org.dspace.content.Bundle;
import org.dspace.content.Collection;
import org.dspace.content.DCValue;
import org.dspace.content.DSpaceObject;
import org.dspace.content.Item;
import org.dspace.content.ItemIterator;
import org.dspace.core.ConfigurationManager;
import org.dspace.core.Constants;
import org.dspace.core.Context;
import org.dspace.core.Utils;
import org.dspace.handle.HandleManager;
import org.dspace.app.util.Util;
import edu.harvard.hul.ois.mets.Agent;
import edu.harvard.hul.ois.mets.AmdSec;
import edu.harvard.hul.ois.mets.BinData;
import edu.harvard.hul.ois.mets.Checksumtype;
import edu.harvard.hul.ois.mets.Div;
import edu.harvard.hul.ois.mets.DmdSec;
import edu.harvard.hul.ois.mets.FLocat;
import edu.harvard.hul.ois.mets.FileGrp;
import edu.harvard.hul.ois.mets.FileSec;
import edu.harvard.hul.ois.mets.Loctype;
import edu.harvard.hul.ois.mets.MdWrap;
import edu.harvard.hul.ois.mets.Mdtype;
import edu.harvard.hul.ois.mets.Mets;
import edu.harvard.hul.ois.mets.MetsHdr;
import edu.harvard.hul.ois.mets.Name;
import edu.harvard.hul.ois.mets.RightsMD;
import edu.harvard.hul.ois.mets.Role;
import edu.harvard.hul.ois.mets.StructMap;
import edu.harvard.hul.ois.mets.Type;
import edu.harvard.hul.ois.mets.XmlData;
import edu.harvard.hul.ois.mets.helper.Base64;
import edu.harvard.hul.ois.mets.helper.MetsException;
import edu.harvard.hul.ois.mets.helper.MetsValidator;
import edu.harvard.hul.ois.mets.helper.MetsWriter;
import edu.harvard.hul.ois.mets.helper.PCData;
import edu.harvard.hul.ois.mets.helper.PreformedXML;
/**
* Tool for exporting DSpace AIPs with the metadata serialised in METS format
*
* @author Robert Tansley
* @version $Revision: 5844 $
* @deprecated Please use METS Packager to import/export METS files
* @see org.dspace.content.packager.DSpaceMETSDisseminator
* @see org.dspace.content.packager.DSpaceMETSIngester
* @see org.dspace.app.packager.Packager
*/
public class METSExport
{
private static int licenseFormat = -1;
private static Properties dcToMODS;
public static void main(String[] args) throws Exception
{
Context context = new Context();
init(context);
// create an options object and populate it
CommandLineParser parser = new PosixParser();
Options options = new Options();
options.addOption("c", "collection", true,
"Handle of collection to export");
options.addOption("i", "item", true, "Handle of item to export");
options.addOption("a", "all", false, "Export all items in the archive");
options.addOption("d", "destination", true, "Destination directory");
options.addOption("h", "help", false, "Help");
CommandLine line = parser.parse(options, args);
if (line.hasOption('h'))
{
HelpFormatter myhelp = new HelpFormatter();
myhelp.printHelp("metsexport", options);
System.out
.println("\nExport a collection: metsexport -c hdl:123.456/789");
System.out
.println("Export an item: metsexport -i hdl:123.456/890");
System.out.println("Export everything: metsexport -a");
System.exit(0);
}
String dest = "";
if (line.hasOption('d'))
{
dest = line.getOptionValue('d');
// Make sure it ends with a file separator
if (!dest.endsWith(File.separator))
{
dest = dest + File.separator;
}
}
if (line.hasOption('i'))
{
String handle = getHandleArg(line.getOptionValue('i'));
// Exporting a single item
DSpaceObject o = HandleManager.resolveToObject(context, handle);
if ((o != null) && o instanceof Item)
{
writeAIP(context, (Item) o, dest);
System.exit(0);
}
else
{
System.err.println(line.getOptionValue('i')
+ " is not a valid item Handle");
System.exit(1);
}
}
ItemIterator items = null;
try
{
if (line.hasOption('c'))
{
String handle = getHandleArg(line.getOptionValue('c'));
// Exporting a collection's worth of items
DSpaceObject o = HandleManager.resolveToObject(context, handle);
if ((o != null) && o instanceof Collection)
{
items = ((Collection) o).getItems();
}
else
{
System.err.println(line.getOptionValue('c')
+ " is not a valid collection Handle");
System.exit(1);
}
}
if (line.hasOption('a'))
{
items = Item.findAll(context);
}
if (items == null)
{
System.err.println("Nothing to export specified!");
System.exit(1);
}
while (items.hasNext())
{
writeAIP(context, items.next(), dest);
}
}
finally
{
if (items != null)
{
items.close();
}
}
context.abort();
System.exit(0);
}
/**
* Initialise various variables, read in config etc.
*
* @param context
* DSpace context
*/
private static void init(Context context) throws SQLException, IOException
{
// Don't init again if initialised already
if (licenseFormat != -1)
{
return;
}
// Find the License format
BitstreamFormat bf = BitstreamFormat.findByShortDescription(context,
"License");
licenseFormat = bf.getID();
// get path to DC->MODS map info file
String configFile = ConfigurationManager.getProperty("dspace.dir")
+ File.separator + "config" + File.separator + "dc2mods.cfg";
// Read it in
InputStream is = null;
try
{
is = new FileInputStream(configFile);
dcToMODS = new Properties();
dcToMODS.load(is);
}
finally
{
if (is != null)
{
try
{
is.close();
}
catch (IOException ioe)
{
}
}
}
}
/**
* Write out the AIP for the given item to the given directory. A new
* directory will be created with the Handle (URL-encoded) as the directory
* name, and inside, a mets.xml file written, together with the bitstreams.
*
* @param context
* DSpace context to use
* @param item
* Item to write
* @param dest
* destination directory
*/
public static void writeAIP(Context context, Item item, String dest)
throws SQLException, IOException, AuthorizeException, MetsException
{
System.out.println("Exporting item hdl:" + item.getHandle());
// Create aip directory
java.io.File aipDir = new java.io.File(dest
+ URLEncoder.encode("hdl:" + item.getHandle(), "UTF-8"));
if (!aipDir.mkdir())
{
// Couldn't make the directory for some reason
throw new IOException("Couldn't create " + aipDir.toString());
}
// Write the METS file
FileOutputStream out = null;
try
{
out = new FileOutputStream(aipDir.toString() + java.io.File.separator + "mets.xml");
writeMETS(context, item, out, false);
}
finally
{
if (out != null)
{
out.close();
}
}
// Write bitstreams
Bundle[] bundles = item.getBundles();
for (int i = 0; i < bundles.length; i++)
{
Bitstream[] bitstreams = bundles[i].getBitstreams();
for (int b = 0; b < bitstreams.length; b++)
{
// Skip license bitstream and unauthorized resources
if ((bitstreams[b].getFormat().getID() != licenseFormat)
&& AuthorizeManager.authorizeActionBoolean(context,
bitstreams[b], Constants.READ))
{
out = new FileOutputStream(aipDir.toString()
+ java.io.File.separator
+ bitstreams[b].getName());
InputStream in = bitstreams[b].retrieve();
Utils.bufferedCopy(in, out);
out.close();
in.close();
}
}
}
}
/**
* Write METS metadata corresponding to the metadata for an item
*
* @param context
* DSpace context
* @param item
* DSpace item to create METS object for
* @param os
* A stream to write METS package to (UTF-8 encoding will be used)
* @param fullURL
* if <code>true</code>, the <FLocat> values for each
* bitstream will be the full URL for that bitstream. Otherwise,
* only the filename itself will be used.
*/
public static void writeMETS(Context context, Item item, OutputStream os, boolean fullURL)
throws SQLException, IOException, AuthorizeException
{
try
{
init(context);
// Create the METS file
Mets mets = new Mets();
// Top-level stuff
mets.setOBJID("hdl:" + item.getHandle());
mets.setLABEL("DSpace Item");
mets.setSchema("mods", "http://www.loc.gov/mods/v3",
"http://www.loc.gov/standards/mods/v3/mods-3-0.xsd");
// MetsHdr
MetsHdr metsHdr = new MetsHdr();
metsHdr.setCREATEDATE(new Date()); // FIXME: CREATEDATE is now:
// maybe should be item create
// date?
// Agent
Agent agent = new Agent();
agent.setROLE(Role.CUSTODIAN);
agent.setTYPE(Type.ORGANIZATION);
Name name = new Name();
name.getContent()
.add(
new PCData(ConfigurationManager
.getProperty("dspace.name")));
agent.getContent().add(name);
metsHdr.getContent().add(agent);
mets.getContent().add(metsHdr);
DmdSec dmdSec = new DmdSec();
dmdSec.setID("DMD_hdl_" + item.getHandle());
MdWrap mdWrap = new MdWrap();
mdWrap.setMDTYPE(Mdtype.MODS);
XmlData xmlData = new XmlData();
createMODS(item, xmlData);
mdWrap.getContent().add(xmlData);
dmdSec.getContent().add(mdWrap);
mets.getContent().add(dmdSec);
// amdSec
AmdSec amdSec = new AmdSec();
amdSec.setID("TMD_hdl_" + item.getHandle());
// FIXME: techMD here
// License as <rightsMD><mdWrap><binData>base64encoded</binData>...
InputStream licenseStream = findLicense(context, item);
if (licenseStream != null)
{
RightsMD rightsMD = new RightsMD();
MdWrap rightsMDWrap = new MdWrap();
rightsMDWrap.setMIMETYPE("text/plain");
rightsMDWrap.setMDTYPE(Mdtype.OTHER);
rightsMDWrap.setOTHERMDTYPE("TEXT");
BinData binData = new BinData();
Base64 base64 = new Base64(licenseStream);
binData.getContent().add(base64);
rightsMDWrap.getContent().add(binData);
rightsMD.getContent().add(rightsMDWrap);
amdSec.getContent().add(rightsMD);
}
// FIXME: History data???? Nooooo!!!!
mets.getContent().add(amdSec);
// fileSec
FileSec fileSec = new FileSec();
boolean fileSecEmpty = true;
Bundle[] bundles = item.getBundles();
for (int i = 0; i < bundles.length; i++)
{
Bitstream[] bitstreams = bundles[i].getBitstreams();
// Unusual condition, but if no bitstreams, skip this bundle
if (bitstreams.length == 0)
{
continue;
}
// First: we skip the license bundle, since it's included
// elsewhere
if (bitstreams[0].getFormat().getID() == licenseFormat)
{
continue;
}
// Create a fileGrp
FileGrp fileGrp = new FileGrp();
// Bundle name for USE attribute
if ((bundles[i].getName() != null)
&& !bundles[i].getName().equals(""))
{
fileGrp.setUSE(bundles[i].getName());
}
for (int bits = 0; bits < bitstreams.length; bits++)
{
// What's the persistent(-ish) ID?
String bitstreamPID = ConfigurationManager
.getProperty("dspace.url")
+ "/bitstream/"
+ item.getHandle()
+ "/"
+ bitstreams[bits].getSequenceID()
+ "/"
+ Util.encodeBitstreamName(bitstreams[bits].getName(),
"UTF-8");
edu.harvard.hul.ois.mets.File file = new edu.harvard.hul.ois.mets.File();
/*
* ID: we use the unique part of the persistent ID, i.e. the
* Handle + sequence number, but with _'s instead of /'s so
* it's a legal xsd:ID.
*/
String xmlIDstart = item.getHandle().replaceAll("/", "_")
+ "_";
file.setID(xmlIDstart + bitstreams[bits].getSequenceID());
String groupID = "GROUP_" + xmlIDstart
+ bitstreams[bits].getSequenceID();
/*
* If we're in THUMBNAIL or TEXT bundles, the bitstream is
* extracted text or a thumbnail, so we use the name to work
* out which bitstream to be in the same group as
*/
if ((bundles[i].getName() != null)
&& (bundles[i].getName().equals("THUMBNAIL") || bundles[i]
.getName().equals("TEXT")))
{
// Try and find the original bitstream, and chuck the
// derived
// bitstream in the same group
Bitstream original = findOriginalBitstream(item,
bitstreams[bits]);
if (original != null)
{
groupID = "GROUP_" + xmlIDstart
+ original.getSequenceID();
}
}
file.setGROUPID(groupID);
file.setOWNERID(bitstreamPID);
// FIXME: ADMID should point to appropriate TechMD section
// above
file
.setMIMETYPE(bitstreams[bits].getFormat()
.getMIMEType());
// FIXME: CREATED: no date
file.setSIZE(bitstreams[bits].getSize());
file.setCHECKSUM(bitstreams[bits].getChecksum());
file.setCHECKSUMTYPE(Checksumtype.MD5);
// FLocat: filename is as in records, or full URL
// FIXME: Duplicate filenames and characters illegal to
// local OS may cause problems
FLocat flocat = new FLocat();
flocat.setLOCTYPE(Loctype.URL);
if (fullURL)
{
flocat.setXlinkHref(bitstreamPID);
}
else
{
flocat.setXlinkHref(bitstreams[bits].getName());
}
// Add FLocat to File, and File to FileGrp
file.getContent().add(flocat);
fileGrp.getContent().add(file);
}
// Add fileGrp to fileSec
fileSec.getContent().add(fileGrp);
fileSecEmpty = false;
}
// Add fileSec to document
if (!fileSecEmpty)
{
mets.getContent().add(fileSec);
}
// FIXME: Add Structmap here, but it is empty and we won't use it now.
StructMap structMap = new StructMap();
Div div = new Div();
structMap.getContent().add(div);
mets.getContent().add(structMap);
mets.validate(new MetsValidator());
mets.write(new MetsWriter(os));
}
catch (MetsException e)
{
// We don't pass up a MetsException, so callers don't need to
// know the details of the METS toolkit
e.printStackTrace();
throw new IOException(e.getMessage(), e);
}
}
/**
* Utility to find the license bitstream from an item
*
* @param context
* DSpace context
* @param item
* the item
* @return the license as a string
*
* @throws IOException
* if the license bitstream can't be read
*/
private static InputStream findLicense(Context context, Item item)
throws SQLException, IOException, AuthorizeException
{
Bundle[] bundles = item.getBundles();
for (int i = 0; i < bundles.length; i++)
{
// Assume license will be in its own bundle
Bitstream[] bitstreams = bundles[i].getBitstreams();
if (bitstreams.length > 0 && bitstreams[0].getFormat().getID() == licenseFormat)
{
// Read the license into a string
return bitstreams[0].retrieve();
}
}
// Oops! No license!
return null;
}
/**
* For a bitstream that's a thumbnail or extracted text, find the
* corresponding bitstream in the ORIGINAL bundle
*
* @param item
* the item we're dealing with
* @param derived
* the derived bitstream
*
* @return the corresponding original bitstream (or null)
*/
private static Bitstream findOriginalBitstream(Item item, Bitstream derived)
throws SQLException
{
Bundle[] bundles = item.getBundles();
// Filename of original will be filename of the derived bitstream
// minus the extension (last 4 chars - .jpg or .txt)
String originalFilename = derived.getName().substring(0,
derived.getName().length() - 4);
// First find "original" bundle
for (int i = 0; i < bundles.length; i++)
{
if ((bundles[i].getName() != null)
&& bundles[i].getName().equals("ORIGINAL"))
{
// Now find the corresponding bitstream
Bitstream[] bitstreams = bundles[i].getBitstreams();
for (int bsnum = 0; bsnum < bitstreams.length; bsnum++)
{
if (bitstreams[bsnum].getName().equals(originalFilename))
{
return bitstreams[bsnum];
}
}
}
}
// Didn't find it
return null;
}
/**
* Create MODS metadata from the DC in the item, and add to the given
* XmlData METS object.
*
* @param item
* the item
* @param xmlData
* xmlData to add MODS to.
*/
private static void createMODS(Item item, XmlData xmlData)
{
DCValue[] dc = item.getDC(Item.ANY, Item.ANY, Item.ANY);
StringBuffer modsXML = new StringBuffer();
for (int i = 0; i < dc.length; i++)
{
// Get the property name - element[.qualifier]
String propName = ((dc[i].qualifier == null) ? dc[i].element
: (dc[i].element + "." + dc[i].qualifier));
String modsMapping = dcToMODS.getProperty(propName);
if (modsMapping == null)
{
System.err.println("WARNING: No MODS mapping for " + propName);
}
else
{
String value = dc[i].value;
// Replace all $'s with \$ so it doesn't trip up the replaceAll!
if (value != null && value.length() > 0)
{
// RegExp note: Yes, there really does need to be this many backslashes!
// To have \$ inserted in the replacement, both the backslash and the dollar
// have to be escaped (backslash) - so the replacemenet string has to be
// passed as \\\$. All of those backslashes then have to escaped in the literal
// for them to be in string used!!!
value = dc[i].value.replaceAll("\\$", "\\\\\\$");
}
if (!(("description.provenance".equals(propName)) &&
((ConfigurationManager.getBooleanProperty("oai.mets.hide-provenance", false)))))
{
// Replace '%s' with DC value (with entities encoded)
modsXML.append(modsMapping.replaceAll("%s", Utils
.addEntities(value)));
modsXML.append("\n"); // For readability
}
}
}
PreformedXML pXML = new PreformedXML(modsXML.toString());
xmlData.getContent().add(pXML);
}
/**
* Get the handle from the command line in the form 123.456/789. Doesn't
* matter if incoming handle has 'hdl:' or 'http://hdl....' before it.
*
* @param original
* Handle as passed in by user
* @return Handle as can be looked up in our table
*/
private static String getHandleArg(String original)
{
if (original.startsWith("hdl:"))
{
return original.substring(4);
}
if (original.startsWith("http://hdl.handle.net/"))
{
return original.substring(22);
}
return original;
}
}
| Java |
/**
* The contents of this file are subject to the license and copyright
* detailed in the LICENSE and NOTICE files at the root of the source
* tree and available online at
*
* http://www.dspace.org/license/
*/
package org.dspace.app.sfx;
import java.io.File;
import java.io.IOException;
import java.net.URLEncoder;
import org.apache.commons.lang.StringUtils;
import org.apache.log4j.Logger;
import org.w3c.dom.Document;
import org.dspace.content.DCPersonName;
import org.dspace.content.DCValue;
import org.dspace.content.Item;
import org.dspace.core.Constants;
import javax.xml.parsers.DocumentBuilderFactory;
import javax.xml.parsers.DocumentBuilder;
import javax.xml.parsers.ParserConfigurationException;
import org.w3c.dom.NamedNodeMap;
import org.w3c.dom.Node;
import org.w3c.dom.NodeList;
import org.xml.sax.SAXException;
public class SFXFileReader {
/** The SFX configuration file */
private static Document doc;
/** log4j logger */
private static final Logger log = Logger.getLogger(SFXFileReader.class);
/**
* Loads the SFX configuraiton file
*
* @param fileName The name of the SFX configuration file
* @param item The item to process
*
* @return the SFX string
* @throws IOException
*/
public static String loadSFXFile(String fileName, Item item) throws IOException
{
// Parse XML file -> XML document will be build
if (doc == null)
{
doc = parseFile(fileName);
}
// Return final sfx Query String
return doNodes(doc, item);
}
/** Parses XML file and returns XML document.
* @param fileName XML file to parse
* @return XML document or <B>null</B> if error occured
*/
public static Document parseFile(String fileName) {
log.info("Parsing XML file... " + fileName);
DocumentBuilder docBuilder;
Document doc = null;
DocumentBuilderFactory docBuilderFactory = DocumentBuilderFactory.newInstance();
docBuilderFactory.setIgnoringElementContentWhitespace(true);
try {
docBuilder = docBuilderFactory.newDocumentBuilder();
}
catch (ParserConfigurationException e) {
log.error("Wrong parser configuration: " + e.getMessage());
return null;
}
File sourceFile = new File(fileName);
try {
doc = docBuilder.parse(sourceFile);
}
catch (SAXException e) {
log.error("Wrong XML file structure: " + e.getMessage());
return null;
}
catch (IOException e) {
log.error("Could not read source file: " + e.getMessage());
}
log.info("XML file parsed");
return doc;
}
/**
* Process the item
*
* @param node
* @param item
* @return
* @throws IOException
*/
public static String doNodes(Node node, Item item) throws IOException
{
if (node == null)
{
log.error (" Empty Node ");
return null;
}
Node e = getElement(node);
NodeList nl = e.getChildNodes();
int len = nl.getLength();
String sfxfield = "";
int i = 0;
while ((i < len) && StringUtils.isEmpty(sfxfield))
{
Node nd = nl.item(i);
if ((nd == null) || isEmptyTextNode(nd))
{
i++;
continue;
}
String tagName = nd.getNodeName();
if (tagName.equals("query-pairs"))
{
sfxfield = processFields(nd, item);
}
i++;
}
log.info("Process fields : " + sfxfield);
return sfxfield;
}
/**
* Process the fields
*
* @param e
* @param item
* @return
* @throws IOException
*/
private static String processFields(Node e, Item item) throws IOException
{
NodeList cl = e.getChildNodes();
int lench = cl.getLength();
String myquery = "";
for (int j = 0; j < lench; j++)
{
Node nch = cl.item(j);
String querystring = "";
String schema = "";
String qualifier = "";
String element = "";
if (nch.getNodeName().equals("field"))
{
NodeList pl = nch.getChildNodes();
int plen = pl.getLength();
int finish = 0;
for (int k = 0; k < plen; k++)
{
Node vn= pl.item(k);
String vName = vn.getNodeName();
if (vName.equals("querystring"))
{
querystring = getValue(vn);
finish ++;
}
else if (vName.equals("dc-schema"))
{
schema = getValue(vn);
finish ++;
}
else if (vName.equals("dc-element"))
{
element = getValue(vn);
finish ++;
}
else if (vName.equals("dc-qualifier"))
{
qualifier = getValue(vn);
finish ++;
if (StringUtils.isEmpty(qualifier))
{
qualifier = null;
}
}
if (finish == 4)
{
DCValue[] dcvalue = item.getMetadata(schema, element, qualifier, Item.ANY);
if (dcvalue.length > 0)
{
// Issued Date
if (element.equals("date") && qualifier.equals("issued"))
{
String fullDate = dcvalue[0].value;
// Remove the time if there is one - day is greatest granularity for SFX
if (fullDate.length() > 10)
{
fullDate = fullDate.substring(0, 10);
}
if (myquery.equals(""))
{ myquery = querystring + URLEncoder.encode(fullDate, Constants.DEFAULT_ENCODING); }
else
{ myquery = myquery + "&" + querystring + URLEncoder.encode(fullDate, Constants.DEFAULT_ENCODING); }
}
else
{
// Contributor Author
if (element.equals("contributor") && qualifier.equals("author"))
{
DCPersonName dpn = new DCPersonName(dcvalue[0].value);
String dpnName = dcvalue[0].value;
if (querystring.endsWith("aulast=")) { dpnName = dpn.getLastName(); }
else { if (querystring.endsWith("aufirst=")) { dpnName = dpn.getFirstNames(); }}
if (myquery.equals(""))
{ myquery = querystring + URLEncoder.encode(dpnName, Constants.DEFAULT_ENCODING); }
else
{ myquery = myquery + "&" + querystring + URLEncoder.encode(dpnName, Constants.DEFAULT_ENCODING); }
}
else
{
if (myquery.equals(""))
{ myquery = querystring + URLEncoder.encode(dcvalue[0].value, Constants.DEFAULT_ENCODING);}
else
{ myquery = myquery + "&" + querystring + URLEncoder.encode(dcvalue[0].value, Constants.DEFAULT_ENCODING);}
}
}
} // if dc.length > 0
finish = 0;
querystring = "";
schema = "";
element = "";
qualifier = "";
} // if finish == 4
} //for k
} // if field
} // for j
return myquery;
}
/** Returns element node
* @param node element (it is XML tag)
* @return Element node otherwise null
*/
public static Node getElement(Node node)
{
NodeList child = node.getChildNodes();
int length = child.getLength();
for (int i = 0; i < length; i++)
{
Node kid = child.item(i);
if (kid.getNodeType() == Node.ELEMENT_NODE)
{
return kid;
}
}
return null;
}
/** Is Empty text Node **/
public static boolean isEmptyTextNode(Node nd)
{
boolean isEmpty = false;
if (nd.getNodeType() == Node.TEXT_NODE)
{
String text = nd.getNodeValue().trim();
if (text.length() == 0)
{
isEmpty = true;
}
}
return isEmpty;
}
/**
* Returns the value of the node's attribute named <name>
**/
public static String getAttribute(Node e, String name)
{
NamedNodeMap attrs = e.getAttributes();
int len = attrs.getLength();
if (len > 0)
{
for (int i = 0; i < len; i++)
{
Node attr = attrs.item(i);
if (name.equals(attr.getNodeName()))
{
return attr.getNodeValue().trim();
}
}
}
//no such attribute
return null;
}
/**
* Returns the value found in the Text node (if any) in the
* node list that's passed in.
*/
public static String getValue(Node node)
{
NodeList child = node.getChildNodes();
for (int i = 0; i < child.getLength(); i++)
{
Node kid = child.item(i);
short type = kid.getNodeType();
if (type == Node.TEXT_NODE)
{
return kid.getNodeValue().trim();
}
}
// Didn't find a text node
return null;
}
} | Java |
/**
* The contents of this file are subject to the license and copyright
* detailed in the LICENSE and NOTICE files at the root of the source
* tree and available online at
*
* http://www.dspace.org/license/
*/
package org.dspace.app.itemimport;
import java.io.*;
import java.sql.SQLException;
import java.util.*;
import java.util.zip.ZipFile;
import java.util.zip.ZipEntry;
import javax.xml.parsers.DocumentBuilder;
import javax.xml.parsers.DocumentBuilderFactory;
import javax.xml.parsers.ParserConfigurationException;
import javax.xml.transform.TransformerException;
import org.apache.commons.cli.CommandLine;
import org.apache.commons.cli.CommandLineParser;
import org.apache.commons.cli.HelpFormatter;
import org.apache.commons.cli.Options;
import org.apache.commons.cli.PosixParser;
import org.apache.commons.lang.StringUtils;
import org.apache.log4j.Logger;
import org.apache.xpath.XPathAPI;
import org.dspace.authorize.AuthorizeException;
import org.dspace.authorize.AuthorizeManager;
import org.dspace.authorize.ResourcePolicy;
import org.dspace.content.Bitstream;
import org.dspace.content.BitstreamFormat;
import org.dspace.content.Bundle;
import org.dspace.content.Collection;
import org.dspace.content.FormatIdentifier;
import org.dspace.content.InstallItem;
import org.dspace.content.Item;
import org.dspace.content.MetadataField;
import org.dspace.content.MetadataSchema;
import org.dspace.content.WorkspaceItem;
import org.dspace.core.ConfigurationManager;
import org.dspace.core.Constants;
import org.dspace.core.Context;
import org.dspace.eperson.EPerson;
import org.dspace.eperson.Group;
import org.dspace.handle.HandleManager;
import org.dspace.search.DSIndexer;
import org.dspace.workflow.WorkflowManager;
import org.w3c.dom.Document;
import org.w3c.dom.NamedNodeMap;
import org.w3c.dom.Node;
import org.w3c.dom.NodeList;
import org.xml.sax.SAXException;
/**
* Import items into DSpace. The conventional use is upload files by copying
* them. DSpace writes the item's bitstreams into its assetstore. Metadata is
* also loaded to the DSpace database.
* <P>
* A second use assumes the bitstream files already exist in a storage
* resource accessible to DSpace. In this case the bitstreams are 'registered'.
* That is, the metadata is loaded to the DSpace database and DSpace is given
* the location of the file which is subsumed into DSpace.
* <P>
* The distinction is controlled by the format of lines in the 'contents' file.
* See comments in processContentsFile() below.
* <P>
* Modified by David Little, UCSD Libraries 12/21/04 to
* allow the registration of files (bitstreams) into DSpace.
*/
public class ItemImport
{
private static final Logger log = Logger.getLogger(ItemImport.class);
static boolean useWorkflow = false;
static boolean useWorkflowSendEmail = false;
static boolean isTest = false;
static boolean isResume = false;
static boolean isQuiet = false;
static boolean template = false;
static PrintWriter mapOut = null;
// File listing filter to look for metadata files
static FilenameFilter metadataFileFilter = new FilenameFilter()
{
public boolean accept(File dir, String n)
{
return n.startsWith("metadata_");
}
};
// File listing filter to check for folders
static FilenameFilter directoryFilter = new FilenameFilter()
{
public boolean accept(File dir, String n)
{
File item = new File(dir.getAbsolutePath() + File.separatorChar + n);
return item.isDirectory();
}
};
public static void main(String[] argv) throws Exception
{
DSIndexer.setBatchProcessingMode(true);
Date startTime = new Date();
int status = 0;
try
{
// create an options object and populate it
CommandLineParser parser = new PosixParser();
Options options = new Options();
options.addOption("a", "add", false, "add items to DSpace");
options.addOption("r", "replace", false, "replace items in mapfile");
options.addOption("d", "delete", false,
"delete items listed in mapfile");
options.addOption("s", "source", true, "source of items (directory)");
options.addOption("z", "zip", true, "name of zip file");
options.addOption("c", "collection", true,
"destination collection(s) Handle or database ID");
options.addOption("m", "mapfile", true, "mapfile items in mapfile");
options.addOption("e", "eperson", true,
"email of eperson doing importing");
options.addOption("w", "workflow", false,
"send submission through collection's workflow");
options.addOption("n", "notify", false,
"if sending submissions through the workflow, send notification emails");
options.addOption("t", "test", false,
"test run - do not actually import items");
options.addOption("p", "template", false, "apply template");
options.addOption("R", "resume", false,
"resume a failed import (add only)");
options.addOption("q", "quiet", false, "don't display metadata");
options.addOption("h", "help", false, "help");
CommandLine line = parser.parse(options, argv);
String command = null; // add replace remove, etc
String sourcedir = null;
String mapfile = null;
String eperson = null; // db ID or email
String[] collections = null; // db ID or handles
if (line.hasOption('h'))
{
HelpFormatter myhelp = new HelpFormatter();
myhelp.printHelp("ItemImport\n", options);
System.out
.println("\nadding items: ItemImport -a -e eperson -c collection -s sourcedir -m mapfile");
System.out
.println("\nadding items from zip file: ItemImport -a -e eperson -c collection -s sourcedir -z filename.zip -m mapfile");
System.out
.println("replacing items: ItemImport -r -e eperson -c collection -s sourcedir -m mapfile");
System.out
.println("deleting items: ItemImport -d -e eperson -m mapfile");
System.out
.println("If multiple collections are specified, the first collection will be the one that owns the item.");
System.exit(0);
}
if (line.hasOption('a'))
{
command = "add";
}
if (line.hasOption('r'))
{
command = "replace";
}
if (line.hasOption('d'))
{
command = "delete";
}
if (line.hasOption('w'))
{
useWorkflow = true;
if (line.hasOption('n'))
{
useWorkflowSendEmail = true;
}
}
if (line.hasOption('t'))
{
isTest = true;
System.out.println("**Test Run** - not actually importing items.");
}
if (line.hasOption('p'))
{
template = true;
}
if (line.hasOption('s')) // source
{
sourcedir = line.getOptionValue('s');
}
if (line.hasOption('m')) // mapfile
{
mapfile = line.getOptionValue('m');
}
if (line.hasOption('e')) // eperson
{
eperson = line.getOptionValue('e');
}
if (line.hasOption('c')) // collections
{
collections = line.getOptionValues('c');
}
if (line.hasOption('R'))
{
isResume = true;
System.out
.println("**Resume import** - attempting to import items not already imported");
}
if (line.hasOption('q'))
{
isQuiet = true;
}
boolean zip = false;
String zipfilename = "";
String ziptempdir = ConfigurationManager.getProperty("org.dspace.app.itemexport.work.dir");
if (line.hasOption('z'))
{
zip = true;
zipfilename = sourcedir + System.getProperty("file.separator") + line.getOptionValue('z');
}
// now validate
// must have a command set
if (command == null)
{
System.out
.println("Error - must run with either add, replace, or remove (run with -h flag for details)");
System.exit(1);
}
else if ("add".equals(command) || "replace".equals(command))
{
if (sourcedir == null)
{
System.out
.println("Error - a source directory containing items must be set");
System.out.println(" (run with -h flag for details)");
System.exit(1);
}
if (mapfile == null)
{
System.out
.println("Error - a map file to hold importing results must be specified");
System.out.println(" (run with -h flag for details)");
System.exit(1);
}
if (eperson == null)
{
System.out
.println("Error - an eperson to do the importing must be specified");
System.out.println(" (run with -h flag for details)");
System.exit(1);
}
if (collections == null)
{
System.out
.println("Error - at least one destination collection must be specified");
System.out.println(" (run with -h flag for details)");
System.exit(1);
}
}
else if ("delete".equals(command))
{
if (eperson == null)
{
System.out
.println("Error - an eperson to do the importing must be specified");
System.exit(1);
}
if (mapfile == null)
{
System.out.println("Error - a map file must be specified");
System.exit(1);
}
}
// can only resume for adds
if (isResume && !"add".equals(command))
{
System.out
.println("Error - resume option only works with --add command");
System.exit(1);
}
// do checks around mapfile - if mapfile exists and 'add' is selected,
// resume must be chosen
File myFile = new File(mapfile);
if (!isResume && "add".equals(command) && myFile.exists())
{
System.out.println("Error - the mapfile " + mapfile
+ " already exists.");
System.out
.println("Either delete it or use --resume if attempting to resume an aborted import.");
System.exit(1);
}
// does the zip file exist and can we write to the temp directory
if (zip)
{
File zipfile = new File(sourcedir);
if (!zipfile.canRead())
{
System.out.println("Zip file '" + sourcedir + "' does not exist, or is not readable.");
System.exit(1);
}
if (ziptempdir == null)
{
System.out.println("Unable to unzip import file as the key 'org.dspace.app.itemexport.work.dir' is not set in dspace.cfg");
System.exit(1);
}
zipfile = new File(ziptempdir);
if (!zipfile.isDirectory())
{
System.out.println("'" + ConfigurationManager.getProperty("org.dspace.app.itemexport.work.dir") +
"' as defined by the key 'org.dspace.app.itemexport.work.dir' in dspace.cfg " +
"is not a valid directory");
System.exit(1);
}
File tempdir = new File(ziptempdir);
if (!tempdir.exists() && !tempdir.mkdirs())
{
log.error("Unable to create temporary directory");
}
sourcedir = ziptempdir + System.getProperty("file.separator") + line.getOptionValue("z");
ziptempdir = ziptempdir + System.getProperty("file.separator") +
line.getOptionValue("z") + System.getProperty("file.separator");
}
ItemImport myloader = new ItemImport();
// create a context
Context c = new Context();
// find the EPerson, assign to context
EPerson myEPerson = null;
if (eperson.indexOf('@') != -1)
{
// @ sign, must be an email
myEPerson = EPerson.findByEmail(c, eperson);
}
else
{
myEPerson = EPerson.find(c, Integer.parseInt(eperson));
}
if (myEPerson == null)
{
System.out.println("Error, eperson cannot be found: " + eperson);
System.exit(1);
}
c.setCurrentUser(myEPerson);
// find collections
Collection[] mycollections = null;
// don't need to validate collections set if command is "delete"
if (!"delete".equals(command))
{
System.out.println("Destination collections:");
mycollections = new Collection[collections.length];
// validate each collection arg to see if it's a real collection
for (int i = 0; i < collections.length; i++)
{
// is the ID a handle?
if (collections[i].indexOf('/') != -1)
{
// string has a / so it must be a handle - try and resolve
// it
mycollections[i] = (Collection) HandleManager
.resolveToObject(c, collections[i]);
// resolved, now make sure it's a collection
if ((mycollections[i] == null)
|| (mycollections[i].getType() != Constants.COLLECTION))
{
mycollections[i] = null;
}
}
// not a handle, try and treat it as an integer collection
// database ID
else if (collections[i] != null)
{
mycollections[i] = Collection.find(c, Integer
.parseInt(collections[i]));
}
// was the collection valid?
if (mycollections[i] == null)
{
throw new IllegalArgumentException("Cannot resolve "
+ collections[i] + " to collection");
}
// print progress info
String owningPrefix = "";
if (i == 0)
{
owningPrefix = "Owning ";
}
System.out.println(owningPrefix + " Collection: "
+ mycollections[i].getMetadata("name"));
}
} // end of validating collections
try
{
// If this is a zip archive, unzip it first
if (zip)
{
ZipFile zf = new ZipFile(zipfilename);
ZipEntry entry;
Enumeration entries = zf.entries();
while (entries.hasMoreElements())
{
entry = (ZipEntry)entries.nextElement();
if (entry.isDirectory())
{
if (!new File(ziptempdir + entry.getName()).mkdir())
{
log.error("Unable to create contents directory");
}
}
else
{
System.out.println("Extracting file: " + entry.getName());
int index = entry.getName().lastIndexOf('/');
if (index == -1)
{
// Was it created on Windows instead?
index = entry.getName().lastIndexOf('\\');
}
if (index > 0)
{
File dir = new File(ziptempdir + entry.getName().substring(0, index));
if (!dir.mkdirs())
{
log.error("Unable to create directory");
}
}
byte[] buffer = new byte[1024];
int len;
InputStream in = zf.getInputStream(entry);
BufferedOutputStream out = new BufferedOutputStream(
new FileOutputStream(ziptempdir + entry.getName()));
while((len = in.read(buffer)) >= 0)
{
out.write(buffer, 0, len);
}
in.close();
out.close();
}
}
}
c.setIgnoreAuthorization(true);
if ("add".equals(command))
{
myloader.addItems(c, mycollections, sourcedir, mapfile, template);
}
else if ("replace".equals(command))
{
myloader.replaceItems(c, mycollections, sourcedir, mapfile, template);
}
else if ("delete".equals(command))
{
myloader.deleteItems(c, mapfile);
}
// complete all transactions
c.complete();
}
catch (Exception e)
{
// abort all operations
if (mapOut != null)
{
mapOut.close();
}
mapOut = null;
c.abort();
e.printStackTrace();
System.out.println(e);
status = 1;
}
// Delete the unzipped file
try
{
if (zip)
{
System.gc();
System.out.println("Deleting temporary zip directory: " + ziptempdir);
ItemImport.deleteDirectory(new File(ziptempdir));
}
}
catch (Exception ex)
{
System.out.println("Unable to delete temporary zip archive location: " + ziptempdir);
}
if (mapOut != null)
{
mapOut.close();
}
if (isTest)
{
System.out.println("***End of Test Run***");
}
}
finally
{
DSIndexer.setBatchProcessingMode(false);
Date endTime = new Date();
System.out.println("Started: " + startTime.getTime());
System.out.println("Ended: " + endTime.getTime());
System.out.println("Elapsed time: " + ((endTime.getTime() - startTime.getTime()) / 1000) + " secs (" + (endTime.getTime() - startTime.getTime()) + " msecs)");
}
System.exit(status);
}
private void addItems(Context c, Collection[] mycollections,
String sourceDir, String mapFile, boolean template) throws Exception
{
Map<String, String> skipItems = new HashMap<String, String>(); // set of items to skip if in 'resume'
// mode
System.out.println("Adding items from directory: " + sourceDir);
System.out.println("Generating mapfile: " + mapFile);
// create the mapfile
File outFile = null;
if (!isTest)
{
// get the directory names of items to skip (will be in keys of
// hash)
if (isResume)
{
skipItems = readMapFile(mapFile);
}
// sneaky isResume == true means open file in append mode
outFile = new File(mapFile);
mapOut = new PrintWriter(new FileWriter(outFile, isResume));
if (mapOut == null)
{
throw new Exception("can't open mapfile: " + mapFile);
}
}
// open and process the source directory
File d = new java.io.File(sourceDir);
if (d == null)
{
System.out.println("Error, cannot open source directory " + sourceDir);
System.exit(1);
}
String[] dircontents = d.list(directoryFilter);
Arrays.sort(dircontents);
for (int i = 0; i < dircontents.length; i++)
{
if (skipItems.containsKey(dircontents[i]))
{
System.out.println("Skipping import of " + dircontents[i]);
}
else
{
addItem(c, mycollections, sourceDir, dircontents[i], mapOut, template);
System.out.println(i + " " + dircontents[i]);
c.clearCache();
}
}
}
private void replaceItems(Context c, Collection[] mycollections,
String sourceDir, String mapFile, boolean template) throws Exception
{
// verify the source directory
File d = new java.io.File(sourceDir);
if (d == null)
{
System.out.println("Error, cannot open source directory "
+ sourceDir);
System.exit(1);
}
// read in HashMap first, to get list of handles & source dirs
Map<String, String> myHash = readMapFile(mapFile);
// for each handle, re-import the item, discard the new handle
// and re-assign the old handle
for (Map.Entry<String, String> mapEntry : myHash.entrySet())
{
// get the old handle
String newItemName = mapEntry.getKey();
String oldHandle = mapEntry.getValue();
Item oldItem = null;
if (oldHandle.indexOf('/') != -1)
{
System.out.println("\tReplacing: " + oldHandle);
// add new item, locate old one
oldItem = (Item) HandleManager.resolveToObject(c, oldHandle);
}
else
{
oldItem = Item.find(c, Integer.parseInt(oldHandle));
}
/* Rather than exposing public item methods to change handles --
* two handles can't exist at the same time due to key constraints
* so would require temp handle being stored, old being copied to new and
* new being copied to old, all a bit messy -- a handle file is written to
* the import directory containing the old handle, the existing item is
* deleted and then the import runs as though it were loading an item which
* had already been assigned a handle (so a new handle is not even assigned).
* As a commit does not occur until after a successful add, it is safe to
* do a delete as any error results in an aborted transaction without harming
* the original item */
File handleFile = new File(sourceDir + File.separatorChar + newItemName + File.separatorChar + "handle");
PrintWriter handleOut = new PrintWriter(new FileWriter(handleFile, true));
if (handleOut == null)
{
throw new Exception("can't open handle file: " + handleFile.getCanonicalPath());
}
handleOut.println(oldHandle);
handleOut.close();
deleteItem(c, oldItem);
addItem(c, mycollections, sourceDir, newItemName, null, template);
c.clearCache();
}
}
private void deleteItems(Context c, String mapFile) throws Exception
{
System.out.println("Deleting items listed in mapfile: " + mapFile);
// read in the mapfile
Map<String, String> myhash = readMapFile(mapFile);
// now delete everything that appeared in the mapFile
Iterator i = myhash.keySet().iterator();
while (i.hasNext())
{
String itemID = myhash.get(i.next());
if (itemID.indexOf('/') != -1)
{
String myhandle = itemID;
System.out.println("Deleting item " + myhandle);
deleteItem(c, myhandle);
}
else
{
// it's an ID
Item myitem = Item.find(c, Integer.parseInt(itemID));
System.out.println("Deleting item " + itemID);
deleteItem(c, myitem);
}
c.clearCache();
}
}
/**
* item? try and add it to the archive c mycollection path itemname handle -
* non-null means we have a pre-defined handle already mapOut - mapfile
* we're writing
*/
private Item addItem(Context c, Collection[] mycollections, String path,
String itemname, PrintWriter mapOut, boolean template) throws Exception
{
String mapOutput = null;
System.out.println("Adding item from directory " + itemname);
// create workspace item
Item myitem = null;
WorkspaceItem wi = null;
if (!isTest)
{
wi = WorkspaceItem.create(c, mycollections[0], template);
myitem = wi.getItem();
}
// now fill out dublin core for item
loadMetadata(c, myitem, path + File.separatorChar + itemname
+ File.separatorChar);
// and the bitstreams from the contents file
// process contents file, add bistreams and bundles, return any
// non-standard permissions
List<String> options = processContentsFile(c, myitem, path
+ File.separatorChar + itemname, "contents");
if (useWorkflow)
{
// don't process handle file
// start up a workflow
if (!isTest)
{
// Should we send a workflow alert email or not?
if (useWorkflowSendEmail)
{
WorkflowManager.start(c, wi);
}
else
{
WorkflowManager.startWithoutNotify(c, wi);
}
// send ID to the mapfile
mapOutput = itemname + " " + myitem.getID();
}
}
else
{
// only process handle file if not using workflow system
String myhandle = processHandleFile(c, myitem, path
+ File.separatorChar + itemname, "handle");
// put item in system
if (!isTest)
{
InstallItem.installItem(c, wi, myhandle);
// find the handle, and output to map file
myhandle = HandleManager.findHandle(c, myitem);
mapOutput = itemname + " " + myhandle;
}
// set permissions if specified in contents file
if (options.size() > 0)
{
System.out.println("Processing options");
processOptions(c, myitem, options);
}
}
// now add to multiple collections if requested
if (mycollections.length > 1)
{
for (int i = 1; i < mycollections.length; i++)
{
if (!isTest)
{
mycollections[i].addItem(myitem);
}
}
}
// made it this far, everything is fine, commit transaction
if (mapOut != null)
{
mapOut.println(mapOutput);
}
c.commit();
return myitem;
}
// remove, given the actual item
private void deleteItem(Context c, Item myitem) throws Exception
{
if (!isTest)
{
Collection[] collections = myitem.getCollections();
// Remove item from all the collections it's in
for (int i = 0; i < collections.length; i++)
{
collections[i].removeItem(myitem);
}
}
}
// remove, given a handle
private void deleteItem(Context c, String myhandle) throws Exception
{
// bit of a hack - to remove an item, you must remove it
// from all collections it's a part of, then it will be removed
Item myitem = (Item) HandleManager.resolveToObject(c, myhandle);
if (myitem == null)
{
System.out.println("Error - cannot locate item - already deleted?");
}
else
{
deleteItem(c, myitem);
}
}
////////////////////////////////////
// utility methods
////////////////////////////////////
// read in the map file and generate a hashmap of (file,handle) pairs
private Map<String, String> readMapFile(String filename) throws Exception
{
Map<String, String> myHash = new HashMap<String, String>();
BufferedReader is = null;
try
{
is = new BufferedReader(new FileReader(filename));
String line;
while ((line = is.readLine()) != null)
{
String myFile;
String myHandle;
// a line should be archive filename<whitespace>handle
StringTokenizer st = new StringTokenizer(line);
if (st.hasMoreTokens())
{
myFile = st.nextToken();
}
else
{
throw new Exception("Bad mapfile line:\n" + line);
}
if (st.hasMoreTokens())
{
myHandle = st.nextToken();
}
else
{
throw new Exception("Bad mapfile line:\n" + line);
}
myHash.put(myFile, myHandle);
}
}
finally
{
if (is != null)
{
is.close();
}
}
return myHash;
}
// Load all metadata schemas into the item.
private void loadMetadata(Context c, Item myitem, String path)
throws SQLException, IOException, ParserConfigurationException,
SAXException, TransformerException, AuthorizeException
{
// Load the dublin core metadata
loadDublinCore(c, myitem, path + "dublin_core.xml");
// Load any additional metadata schemas
File folder = new File(path);
File file[] = folder.listFiles(metadataFileFilter);
for (int i = 0; i < file.length; i++)
{
loadDublinCore(c, myitem, file[i].getAbsolutePath());
}
}
private void loadDublinCore(Context c, Item myitem, String filename)
throws SQLException, IOException, ParserConfigurationException,
SAXException, TransformerException, AuthorizeException
{
Document document = loadXML(filename);
// Get the schema, for backward compatibility we will default to the
// dublin core schema if the schema name is not available in the import
// file
String schema;
NodeList metadata = XPathAPI.selectNodeList(document, "/dublin_core");
Node schemaAttr = metadata.item(0).getAttributes().getNamedItem(
"schema");
if (schemaAttr == null)
{
schema = MetadataSchema.DC_SCHEMA;
}
else
{
schema = schemaAttr.getNodeValue();
}
// Get the nodes corresponding to formats
NodeList dcNodes = XPathAPI.selectNodeList(document,
"/dublin_core/dcvalue");
if (!isQuiet)
{
System.out.println("\tLoading dublin core from " + filename);
}
// Add each one as a new format to the registry
for (int i = 0; i < dcNodes.getLength(); i++)
{
Node n = dcNodes.item(i);
addDCValue(c, myitem, schema, n);
}
}
private void addDCValue(Context c, Item i, String schema, Node n) throws TransformerException, SQLException, AuthorizeException
{
String value = getStringValue(n); //n.getNodeValue();
// compensate for empty value getting read as "null", which won't display
if (value == null)
{
value = "";
}
// //getElementData(n, "element");
String element = getAttributeValue(n, "element");
String qualifier = getAttributeValue(n, "qualifier"); //NodeValue();
// //getElementData(n,
// "qualifier");
String language = getAttributeValue(n, "language");
if (language != null)
{
language = language.trim();
}
if (!isQuiet)
{
System.out.println("\tSchema: " + schema + " Element: " + element + " Qualifier: " + qualifier
+ " Value: " + value);
}
if ("none".equals(qualifier) || "".equals(qualifier))
{
qualifier = null;
}
// if language isn't set, use the system's default value
if (StringUtils.isEmpty(language))
{
language = ConfigurationManager.getProperty("default.language");
}
// a goofy default, but there it is
if (language == null)
{
language = "en";
}
if (!isTest)
{
i.addMetadata(schema, element, qualifier, language, value);
}
else
{
// If we're just test the import, let's check that the actual metadata field exists.
MetadataSchema foundSchema = MetadataSchema.find(c,schema);
if (foundSchema == null)
{
System.out.println("ERROR: schema '"+schema+"' was not found in the registry.");
return;
}
int schemaID = foundSchema.getSchemaID();
MetadataField foundField = MetadataField.findByElement(c, schemaID, element, qualifier);
if (foundField == null)
{
System.out.println("ERROR: Metadata field: '"+schema+"."+element+"."+qualifier+"' was not found in the registry.");
return;
}
}
}
/**
* Read in the handle file or return null if empty or doesn't exist
*/
private String processHandleFile(Context c, Item i, String path, String filename)
{
File file = new File(path + File.separatorChar + filename);
String result = null;
System.out.println("Processing handle file: " + filename);
if (file.exists())
{
BufferedReader is = null;
try
{
is = new BufferedReader(new FileReader(file));
// result gets contents of file, or null
result = is.readLine();
System.out.println("read handle: '" + result + "'");
}
catch (FileNotFoundException e)
{
// probably no handle file, just return null
System.out.println("It appears there is no handle file -- generating one");
}
catch (IOException e)
{
// probably no handle file, just return null
System.out.println("It appears there is no handle file -- generating one");
}
finally
{
if (is != null)
{
try
{
is.close();
}
catch (IOException e1)
{
System.err.println("Non-critical problem releasing resources.");
}
}
}
}
else
{
// probably no handle file, just return null
System.out.println("It appears there is no handle file -- generating one");
}
return result;
}
/**
* Given a contents file and an item, stuffing it with bitstreams from the
* contents file Returns a List of Strings with lines from the contents
* file that request non-default bitstream permission
*/
private List<String> processContentsFile(Context c, Item i, String path,
String filename) throws SQLException, IOException,
AuthorizeException
{
File contentsFile = new File(path + File.separatorChar + filename);
String line = "";
List<String> options = new ArrayList<String>();
System.out.println("\tProcessing contents file: " + contentsFile);
if (contentsFile.exists())
{
BufferedReader is = null;
try
{
is = new BufferedReader(new FileReader(contentsFile));
while ((line = is.readLine()) != null)
{
if ("".equals(line.trim()))
{
continue;
}
// 1) registered into dspace (leading -r)
// 2) imported conventionally into dspace (no -r)
if (line.trim().startsWith("-r "))
{
// line should be one of these two:
// -r -s n -f filepath
// -r -s n -f filepath\tbundle:bundlename
// where
// n is the assetstore number
// filepath is the path of the file to be registered
// bundlename is an optional bundle name
String sRegistrationLine = line.trim();
int iAssetstore = -1;
String sFilePath = null;
String sBundle = null;
StringTokenizer tokenizer = new StringTokenizer(sRegistrationLine);
while (tokenizer.hasMoreTokens())
{
String sToken = tokenizer.nextToken();
if ("-r".equals(sToken))
{
continue;
}
else if ("-s".equals(sToken) && tokenizer.hasMoreTokens())
{
try
{
iAssetstore =
Integer.parseInt(tokenizer.nextToken());
}
catch (NumberFormatException e)
{
// ignore - iAssetstore remains -1
}
}
else if ("-f".equals(sToken) && tokenizer.hasMoreTokens())
{
sFilePath = tokenizer.nextToken();
}
else if (sToken.startsWith("bundle:"))
{
sBundle = sToken.substring(7);
}
else
{
// unrecognized token - should be no problem
}
} // while
if (iAssetstore == -1 || sFilePath == null)
{
System.out.println("\tERROR: invalid contents file line");
System.out.println("\t\tSkipping line: "
+ sRegistrationLine);
continue;
}
registerBitstream(c, i, iAssetstore, sFilePath, sBundle);
System.out.println("\tRegistering Bitstream: " + sFilePath
+ "\tAssetstore: " + iAssetstore
+ "\tBundle: " + sBundle
+ "\tDescription: " + sBundle);
continue; // process next line in contents file
}
int bitstreamEndIndex = line.indexOf('\t');
if (bitstreamEndIndex == -1)
{
// no extra info
processContentFileEntry(c, i, path, line, null, false);
System.out.println("\tBitstream: " + line);
}
else
{
String bitstreamName = line.substring(0, bitstreamEndIndex);
boolean bundleExists = false;
boolean permissionsExist = false;
boolean descriptionExists = false;
// look for a bundle name
String bundleMarker = "\tbundle:";
int bMarkerIndex = line.indexOf(bundleMarker);
int bEndIndex = 0;
if (bMarkerIndex > 0)
{
bEndIndex = line.indexOf("\t", bMarkerIndex + 1);
if (bEndIndex == -1)
{
bEndIndex = line.length();
}
bundleExists = true;
}
// look for permissions
String permissionsMarker = "\tpermissions:";
int pMarkerIndex = line.indexOf(permissionsMarker);
int pEndIndex = 0;
if (pMarkerIndex > 0)
{
pEndIndex = line.indexOf("\t", pMarkerIndex + 1);
if (pEndIndex == -1)
{
pEndIndex = line.length();
}
permissionsExist = true;
}
// look for descriptions
String descriptionMarker = "\tdescription:";
int dMarkerIndex = line.indexOf(descriptionMarker);
int dEndIndex = 0;
if (dMarkerIndex > 0)
{
dEndIndex = line.indexOf("\t", dMarkerIndex + 1);
if (dEndIndex == -1)
{
dEndIndex = line.length();
}
descriptionExists = true;
}
// is this the primary bitstream?
String primaryBitstreamMarker = "\tprimary:true";
boolean primary = false;
String primaryStr = "";
if (line.contains(primaryBitstreamMarker))
{
primary = true;
primaryStr = "\t **Setting as primary bitstream**";
}
if (bundleExists)
{
String bundleName = line.substring(bMarkerIndex
+ bundleMarker.length(), bEndIndex).trim();
processContentFileEntry(c, i, path, bitstreamName, bundleName, primary);
System.out.println("\tBitstream: " + bitstreamName +
"\tBundle: " + bundleName +
primaryStr);
}
else
{
processContentFileEntry(c, i, path, bitstreamName, null, primary);
System.out.println("\tBitstream: " + bitstreamName + primaryStr);
}
if (permissionsExist || descriptionExists)
{
String extraInfo = bitstreamName;
if (permissionsExist)
{
extraInfo = extraInfo
+ line.substring(pMarkerIndex, pEndIndex);
}
if (descriptionExists)
{
extraInfo = extraInfo
+ line.substring(dMarkerIndex, dEndIndex);
}
options.add(extraInfo);
}
}
}
}
finally
{
if (is != null)
{
is.close();
}
}
}
else
{
String[] dirListing = new File(path).list();
for (String fileName : dirListing)
{
if (!"dublin_core.xml".equals(fileName) && !filename.equals("handle") && !filename.startsWith("metadata_"))
{
throw new FileNotFoundException("No contents file found");
}
}
System.out.println("No contents file found - but only metadata files found. Assuming metadata only.");
}
return options;
}
/**
* each entry represents a bitstream....
* @param c
* @param i
* @param path
* @param fileName
* @param bundleName
* @throws SQLException
* @throws IOException
* @throws AuthorizeException
*/
private void processContentFileEntry(Context c, Item i, String path,
String fileName, String bundleName, boolean primary) throws SQLException,
IOException, AuthorizeException
{
String fullpath = path + File.separatorChar + fileName;
// get an input stream
BufferedInputStream bis = new BufferedInputStream(new FileInputStream(
fullpath));
Bitstream bs = null;
String newBundleName = bundleName;
if (bundleName == null)
{
// is it license.txt?
if ("license.txt".equals(fileName))
{
newBundleName = "LICENSE";
}
else
{
// call it ORIGINAL
newBundleName = "ORIGINAL";
}
}
if (!isTest)
{
// find the bundle
Bundle[] bundles = i.getBundles(newBundleName);
Bundle targetBundle = null;
if (bundles.length < 1)
{
// not found, create a new one
targetBundle = i.createBundle(newBundleName);
}
else
{
// put bitstreams into first bundle
targetBundle = bundles[0];
}
// now add the bitstream
bs = targetBundle.createBitstream(bis);
bs.setName(fileName);
// Identify the format
// FIXME - guessing format guesses license.txt incorrectly as a text
// file format!
BitstreamFormat bf = FormatIdentifier.guessFormat(c, bs);
bs.setFormat(bf);
// Is this a the primary bitstream?
if (primary)
{
targetBundle.setPrimaryBitstreamID(bs.getID());
targetBundle.update();
}
bs.update();
}
bis.close();
}
/**
* Register the bitstream file into DSpace
*
* @param c
* @param i
* @param assetstore
* @param bitstreamPath the full filepath expressed in the contents file
* @param bundleName
* @throws SQLException
* @throws IOException
* @throws AuthorizeException
*/
private void registerBitstream(Context c, Item i, int assetstore,
String bitstreamPath, String bundleName )
throws SQLException, IOException, AuthorizeException
{
// TODO validate assetstore number
// TODO make sure the bitstream is there
Bitstream bs = null;
String newBundleName = bundleName;
if (bundleName == null)
{
// is it license.txt?
if (bitstreamPath.endsWith("license.txt"))
{
newBundleName = "LICENSE";
}
else
{
// call it ORIGINAL
newBundleName = "ORIGINAL";
}
}
if(!isTest)
{
// find the bundle
Bundle[] bundles = i.getBundles(newBundleName);
Bundle targetBundle = null;
if( bundles.length < 1 )
{
// not found, create a new one
targetBundle = i.createBundle(newBundleName);
}
else
{
// put bitstreams into first bundle
targetBundle = bundles[0];
}
// now add the bitstream
bs = targetBundle.registerBitstream(assetstore, bitstreamPath);
// set the name to just the filename
int iLastSlash = bitstreamPath.lastIndexOf('/');
bs.setName(bitstreamPath.substring(iLastSlash + 1));
// Identify the format
// FIXME - guessing format guesses license.txt incorrectly as a text file format!
BitstreamFormat bf = FormatIdentifier.guessFormat(c, bs);
bs.setFormat(bf);
bs.update();
}
}
/**
*
* Process the Options to apply to the Item. The options are tab delimited
*
* Options:
* 48217870-MIT.pdf permissions: -r 'MIT Users' description: Full printable version (MIT only)
* permissions:[r|w]-['group name']
* description: 'the description of the file'
*
* where:
* [r|w] (meaning: read|write)
* ['MIT Users'] (the group name)
*
* @param c
* @param myItem
* @param options
* @throws SQLException
* @throws AuthorizeException
*/
private void processOptions(Context c, Item myItem, List<String> options)
throws SQLException, AuthorizeException
{
for (String line : options)
{
System.out.println("\tprocessing " + line);
boolean permissionsExist = false;
boolean descriptionExists = false;
String permissionsMarker = "\tpermissions:";
int pMarkerIndex = line.indexOf(permissionsMarker);
int pEndIndex = 0;
if (pMarkerIndex > 0)
{
pEndIndex = line.indexOf("\t", pMarkerIndex + 1);
if (pEndIndex == -1)
{
pEndIndex = line.length();
}
permissionsExist = true;
}
String descriptionMarker = "\tdescription:";
int dMarkerIndex = line.indexOf(descriptionMarker);
int dEndIndex = 0;
if (dMarkerIndex > 0)
{
dEndIndex = line.indexOf("\t", dMarkerIndex + 1);
if (dEndIndex == -1)
{
dEndIndex = line.length();
}
descriptionExists = true;
}
int bsEndIndex = line.indexOf("\t");
String bitstreamName = line.substring(0, bsEndIndex);
int actionID = -1;
String groupName = "";
Group myGroup = null;
if (permissionsExist)
{
String thisPermission = line.substring(pMarkerIndex
+ permissionsMarker.length(), pEndIndex);
// get permission type ("read" or "write")
int pTypeIndex = thisPermission.indexOf('-');
// get permission group (should be in single quotes)
int groupIndex = thisPermission.indexOf('\'', pTypeIndex);
int groupEndIndex = thisPermission.indexOf('\'', groupIndex + 1);
// if not in single quotes, assume everything after type flag is
// group name
if (groupIndex == -1)
{
groupIndex = thisPermission.indexOf(' ', pTypeIndex);
groupEndIndex = thisPermission.length();
}
groupName = thisPermission.substring(groupIndex + 1,
groupEndIndex);
if (thisPermission.toLowerCase().charAt(pTypeIndex + 1) == 'r')
{
actionID = Constants.READ;
}
else if (thisPermission.toLowerCase().charAt(pTypeIndex + 1) == 'w')
{
actionID = Constants.WRITE;
}
try
{
myGroup = Group.findByName(c, groupName);
}
catch (SQLException sqle)
{
System.out.println("SQL Exception finding group name: "
+ groupName);
// do nothing, will check for null group later
}
}
String thisDescription = "";
if (descriptionExists)
{
thisDescription = line.substring(
dMarkerIndex + descriptionMarker.length(), dEndIndex)
.trim();
}
Bitstream bs = null;
boolean notfound = true;
if (!isTest)
{
// find bitstream
Bitstream[] bitstreams = myItem.getNonInternalBitstreams();
for (int j = 0; j < bitstreams.length && notfound; j++)
{
if (bitstreams[j].getName().equals(bitstreamName))
{
bs = bitstreams[j];
notfound = false;
}
}
}
if (notfound && !isTest)
{
// this should never happen
System.out.println("\tdefault permissions set for "
+ bitstreamName);
}
else if (!isTest)
{
if (permissionsExist)
{
if (myGroup == null)
{
System.out.println("\t" + groupName
+ " not found, permissions set to default");
}
else if (actionID == -1)
{
System.out
.println("\tinvalid permissions flag, permissions set to default");
}
else
{
System.out.println("\tSetting special permissions for "
+ bitstreamName);
setPermission(c, myGroup, actionID, bs);
}
}
if (descriptionExists)
{
System.out.println("\tSetting description for "
+ bitstreamName);
bs.setDescription(thisDescription);
bs.update();
}
}
}
}
/**
* Set the Permission on a Bitstream.
*
* @param c
* @param g
* @param actionID
* @param bs
* @throws SQLException
* @throws AuthorizeException
*/
private void setPermission(Context c, Group g, int actionID, Bitstream bs)
throws SQLException, AuthorizeException
{
if (!isTest)
{
// remove the default policy
AuthorizeManager.removeAllPolicies(c, bs);
// add the policy
ResourcePolicy rp = ResourcePolicy.create(c);
rp.setResource(bs);
rp.setAction(actionID);
rp.setGroup(g);
rp.update();
}
else
{
if (actionID == Constants.READ)
{
System.out.println("\t\tpermissions: READ for " + g.getName());
}
else if (actionID == Constants.WRITE)
{
System.out.println("\t\tpermissions: WRITE for " + g.getName());
}
}
}
// XML utility methods
/**
* Lookup an attribute from a DOM node.
* @param n
* @param name
* @return
*/
private String getAttributeValue(Node n, String name)
{
NamedNodeMap nm = n.getAttributes();
for (int i = 0; i < nm.getLength(); i++)
{
Node node = nm.item(i);
if (name.equals(node.getNodeName()))
{
return node.getNodeValue();
}
}
return "";
}
/**
* Return the String value of a Node.
* @param node
* @return
*/
private String getStringValue(Node node)
{
String value = node.getNodeValue();
if (node.hasChildNodes())
{
Node first = node.getFirstChild();
if (first.getNodeType() == Node.TEXT_NODE)
{
return first.getNodeValue();
}
}
return value;
}
/**
* Load in the XML from file.
*
* @param filename
* the filename to load from
*
* @return the DOM representation of the XML file
*/
private static Document loadXML(String filename) throws IOException,
ParserConfigurationException, SAXException
{
DocumentBuilder builder = DocumentBuilderFactory.newInstance()
.newDocumentBuilder();
return builder.parse(new File(filename));
}
/**
* Delete a directory and its child files and directories
* @param path The directory to delete
* @return Whether the deletion was successful or not
*/
private static boolean deleteDirectory(File path)
{
if (path.exists())
{
File[] files = path.listFiles();
for (int i = 0; i < files.length; i++)
{
if (files[i].isDirectory())
{
deleteDirectory(files[i]);
}
else
{
if (!files[i].delete())
{
log.error("Unable to delete file: " + files[i].getName());
}
}
}
}
boolean pathDeleted = path.delete();
return (pathDeleted);
}
}
| Java |
/**
* The contents of this file are subject to the license and copyright
* detailed in the LICENSE and NOTICE files at the root of the source
* tree and available online at
*
* http://www.dspace.org/license/
*/
package org.dspace.app.util;
import java.util.Map;
import java.util.Set;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Enumeration;
import java.sql.SQLException;
import org.apache.log4j.Logger;
import org.dspace.authorize.AuthorizeManager;
import org.dspace.core.ConfigurationManager;
import org.dspace.core.Context;
/**
* Static utility class to manage configuration for exposure (hiding) of
* certain Item metadata fields.
*
* This class answers the question, "is the user allowed to see this
* metadata field?" Any external interface (UI, OAI-PMH, etc) that
* disseminates metadata should consult it before disseminating the value
* of a metadata field.
*
* Since the MetadataExposure.isHidden() method gets called in a lot of inner
* loops, it is important to implement it efficiently, in both time and
* memory utilization. It computes an answer without consuming ANY memory
* (e.g. it does not build any temporary Strings) and in close to constant
* time by use of hash tables. Although most sites will only hide a few
* fields, we can't predict what the usage will be so it's better to make it
* scalable.
*
* Algorithm is as follows:
* 1. If a Context is provided and it has a user who is Administrator,
* always grant access (return false).
* 2. Return true if field is on the hidden list, false otherwise.
*
* The internal maps are populated from DSpace Configuration at the first
* call, in case the properties are not available in the static context.
*
* Configuration Properties:
* ## hide a single metadata field
* #metadata.hide.SCHEMA.ELEMENT[.QUALIFIER] = true
* # example: dc.type
* metadata.hide.dc.type = true
* # example: dc.description.provenance
* metadata.hide.dc.description.provenance = true
*
* @author Larry Stone
* @version $Revision: 3734 $
*/
public class MetadataExposure
{
private static Logger log = Logger.getLogger(MetadataExposure.class);
private static Map<String,Set<String>> hiddenElementSets = null;
private static Map<String,Map<String,Set<String>>> hiddenElementMaps = null;
private static final String CONFIG_PREFIX = "metadata.hide.";
public static boolean isHidden(Context context, String schema, String element, String qualifier)
throws SQLException
{
// the administrator's override
if (context != null && AuthorizeManager.isAdmin(context))
{
return false;
}
// for schema.element, just check schema->elementSet
if (!isInitialized())
{
init();
}
if (qualifier == null)
{
Set<String> elts = hiddenElementSets.get(schema);
return elts == null ? false : elts.contains(element);
}
// for schema.element.qualifier, just schema->eltMap->qualSet
else
{
Map<String,Set<String>> elts = hiddenElementMaps.get(schema);
if (elts == null)
{
return false;
}
Set<String> quals = elts.get(element);
return quals == null ? false : quals.contains(qualifier);
}
}
private static boolean isInitialized()
{
return hiddenElementSets != null;
}
// load maps from configuration unless it's already done.
private static synchronized void init()
{
if (!isInitialized())
{
hiddenElementSets = new HashMap<String,Set<String>>();
hiddenElementMaps = new HashMap<String,Map<String,Set<String>>>();
Enumeration pne = ConfigurationManager.propertyNames();
while (pne.hasMoreElements())
{
String key = (String)pne.nextElement();
if (key.startsWith(CONFIG_PREFIX))
{
String mdField = key.substring(CONFIG_PREFIX.length());
String segment[] = mdField.split("\\.", 3);
// got schema.element.qualifier
if (segment.length == 3)
{
Map<String,Set<String>> eltMap = hiddenElementMaps.get(segment[0]);
if (eltMap == null)
{
eltMap = new HashMap<String,Set<String>>();
hiddenElementMaps.put(segment[0], eltMap);
}
if (!eltMap.containsKey(segment[1]))
{
eltMap.put(segment[1], new HashSet<String>());
}
eltMap.get(segment[1]).add(segment[2]);
}
// got schema.element
else if (segment.length == 2)
{
if (!hiddenElementSets.containsKey(segment[0]))
{
hiddenElementSets.put(segment[0], new HashSet<String>());
}
hiddenElementSets.get(segment[0]).add(segment[1]);
}
// oops..
else
{
log.warn("Bad format in hidden metadata directive, field=\""+mdField+"\", config property="+key);
}
}
}
}
}
}
| Java |
/**
* The contents of this file are subject to the license and copyright
* detailed in the LICENSE and NOTICE files at the root of the source
* tree and available online at
*
* http://www.dspace.org/license/
*/
package org.dspace.app.util;
import java.io.IOException;
import java.io.InputStream;
import java.text.DecimalFormat;
import java.text.NumberFormat;
import java.util.Enumeration;
import java.util.Locale;
import java.util.Properties;
import javax.servlet.http.HttpServletRequest;
import org.apache.log4j.Logger;
import org.dspace.core.Constants;
/**
* Miscellaneous utility methods
*
* @author Robert Tansley
* @author Mark Diggory
* @version $Revision: 5844 $
*/
public class Util {
// cache for source version result
private static String sourceVersion = null;
private static Logger log = Logger.getLogger(Util.class);
/**
* Utility method to convert spaces in a string to HTML non-break space
* elements.
*
* @param s
* string to change spaces in
* @return the string passed in with spaces converted to HTML non-break
* spaces
*/
public static String nonBreakSpace(String s) {
StringBuffer newString = new StringBuffer();
for (int i = 0; i < s.length(); i++)
{
char ch = s.charAt(i);
if (ch == ' ')
{
newString.append(" ");
}
else
{
newString.append(ch);
}
}
return newString.toString();
}
/**
* Encode a bitstream name for inclusion in a URL in an HTML document. This
* differs from the usual URL-encoding, since we want pathname separators to
* be passed through verbatim; this is required so that relative paths in
* bitstream names and HTML references work correctly.
* <P>
* If the link to a bitstream is generated with the pathname separators
* escaped (e.g. "%2F" instead of "/") then the Web user agent perceives it
* to be one pathname element, and relative URI paths within that document
* containing ".." elements will be handled incorrectly.
* <P>
*
* @param stringIn
* input string to encode
* @param encoding
* character encoding, e.g. UTF-8
* @return the encoded string
*/
public static String encodeBitstreamName(String stringIn, String encoding) throws java.io.UnsupportedEncodingException {
// FIXME: This should be moved elsewhere, as it is used outside the UI
StringBuffer out = new StringBuffer();
final String[] pctEncoding = { "%00", "%01", "%02", "%03", "%04",
"%05", "%06", "%07", "%08", "%09", "%0a", "%0b", "%0c", "%0d",
"%0e", "%0f", "%10", "%11", "%12", "%13", "%14", "%15", "%16",
"%17", "%18", "%19", "%1a", "%1b", "%1c", "%1d", "%1e", "%1f",
"%20", "%21", "%22", "%23", "%24", "%25", "%26", "%27", "%28",
"%29", "%2a", "%2b", "%2c", "%2d", "%2e", "%2f", "%30", "%31",
"%32", "%33", "%34", "%35", "%36", "%37", "%38", "%39", "%3a",
"%3b", "%3c", "%3d", "%3e", "%3f", "%40", "%41", "%42", "%43",
"%44", "%45", "%46", "%47", "%48", "%49", "%4a", "%4b", "%4c",
"%4d", "%4e", "%4f", "%50", "%51", "%52", "%53", "%54", "%55",
"%56", "%57", "%58", "%59", "%5a", "%5b", "%5c", "%5d", "%5e",
"%5f", "%60", "%61", "%62", "%63", "%64", "%65", "%66", "%67",
"%68", "%69", "%6a", "%6b", "%6c", "%6d", "%6e", "%6f", "%70",
"%71", "%72", "%73", "%74", "%75", "%76", "%77", "%78", "%79",
"%7a", "%7b", "%7c", "%7d", "%7e", "%7f", "%80", "%81", "%82",
"%83", "%84", "%85", "%86", "%87", "%88", "%89", "%8a", "%8b",
"%8c", "%8d", "%8e", "%8f", "%90", "%91", "%92", "%93", "%94",
"%95", "%96", "%97", "%98", "%99", "%9a", "%9b", "%9c", "%9d",
"%9e", "%9f", "%a0", "%a1", "%a2", "%a3", "%a4", "%a5", "%a6",
"%a7", "%a8", "%a9", "%aa", "%ab", "%ac", "%ad", "%ae", "%af",
"%b0", "%b1", "%b2", "%b3", "%b4", "%b5", "%b6", "%b7", "%b8",
"%b9", "%ba", "%bb", "%bc", "%bd", "%be", "%bf", "%c0", "%c1",
"%c2", "%c3", "%c4", "%c5", "%c6", "%c7", "%c8", "%c9", "%ca",
"%cb", "%cc", "%cd", "%ce", "%cf", "%d0", "%d1", "%d2", "%d3",
"%d4", "%d5", "%d6", "%d7", "%d8", "%d9", "%da", "%db", "%dc",
"%dd", "%de", "%df", "%e0", "%e1", "%e2", "%e3", "%e4", "%e5",
"%e6", "%e7", "%e8", "%e9", "%ea", "%eb", "%ec", "%ed", "%ee",
"%ef", "%f0", "%f1", "%f2", "%f3", "%f4", "%f5", "%f6", "%f7",
"%f8", "%f9", "%fa", "%fb", "%fc", "%fd", "%fe", "%ff" };
byte[] bytes = stringIn.getBytes(encoding);
for (int i = 0; i < bytes.length; i++)
{
// Any unreserved char or "/" goes through unencoded
if ((bytes[i] >= 'A' && bytes[i] <= 'Z')
|| (bytes[i] >= 'a' && bytes[i] <= 'z')
|| (bytes[i] >= '0' && bytes[i] <= '9') || bytes[i] == '-'
|| bytes[i] == '.' || bytes[i] == '_' || bytes[i] == '~'
|| bytes[i] == '/')
{
out.append((char) bytes[i]);
}
else if (bytes[i] >= 0)
{
// encode other chars (byte code < 128)
out.append(pctEncoding[bytes[i]]);
}
else
{
// encode other chars (byte code > 127, so it appears as
// negative in Java signed byte data type)
out.append(pctEncoding[256 + bytes[i]]);
}
}
log.debug("encoded \"" + stringIn + "\" to \"" + out.toString() + "\"");
return out.toString();
}
/** Version of encodeBitstreamName with one parameter, uses default encoding
* <P>
* @param stringIn
* input string to encode
* @return the encoded string
*/
public static String encodeBitstreamName(String stringIn) throws java.io.UnsupportedEncodingException {
return encodeBitstreamName(stringIn, Constants.DEFAULT_ENCODING);
}
/**
* Formats the file size. Examples:
*
* - 50 = 50B
* - 1024 = 1KB
* - 1,024,000 = 1MB etc
*
* The numbers are formatted using java Locales
*
* @param in The number to convert
* @return the file size as a String
*/
public static String formatFileSize(double in) {
// Work out the size of the file, and format appropriatly
// FIXME: When full i18n support is available, use the user's Locale
// rather than the default Locale.
NumberFormat nf = NumberFormat.getNumberInstance(Locale.getDefault());
DecimalFormat df = (DecimalFormat)nf;
df.applyPattern("###,###.##");
if (in < 1024)
{
df.applyPattern("0");
return df.format(in) + " " + "B";
}
else if (in < 1024000)
{
in = in / 1024;
return df.format(in) + " " + "kB";
}
else if (in < 1024000000)
{
in = in / 1024000;
return df.format(in) + " " + "MB";
}
else
{
in = in / 1024000000;
return df.format(in) + " " + "GB";
}
}
/**
* Obtain a parameter from the given request as an int. <code>-1</code> is
* returned if the parameter is garbled or does not exist.
*
* @param request
* the HTTP request
* @param param
* the name of the parameter
*
* @return the integer value of the parameter, or -1
*/
public static int getIntParameter(HttpServletRequest request, String param)
{
String val = request.getParameter(param);
try
{
return Integer.parseInt(val.trim());
}
catch (Exception e)
{
// Problem with parameter
return -1;
}
}
/**
* Obtain an array of int parameters from the given request as an int. null
* is returned if parameter doesn't exist. <code>-1</code> is returned in
* array locations if that particular value is garbled.
*
* @param request
* the HTTP request
* @param param
* the name of the parameter
*
* @return array of integers or null
*/
public static int[] getIntParameters(HttpServletRequest request,
String param)
{
String[] request_values = request.getParameterValues(param);
if (request_values == null)
{
return null;
}
int[] return_values = new int[request_values.length];
for (int x = 0; x < return_values.length; x++)
{
try
{
return_values[x] = Integer.parseInt(request_values[x]);
}
catch (Exception e)
{
// Problem with parameter, stuff -1 in this slot
return_values[x] = -1;
}
}
return return_values;
}
/**
* Obtain a parameter from the given request as a boolean.
* <code>false</code> is returned if the parameter is garbled or does not
* exist.
*
* @param request
* the HTTP request
* @param param
* the name of the parameter
*
* @return the integer value of the parameter, or -1
*/
public static boolean getBoolParameter(HttpServletRequest request,
String param)
{
return ((request.getParameter(param) != null) && request.getParameter(
param).equals("true"));
}
/**
* Get the button the user pressed on a submitted form. All buttons should
* start with the text <code>submit</code> for this to work. A default
* should be supplied, since often the browser will submit a form with no
* submit button pressed if the user presses enter.
*
* @param request
* the HTTP request
* @param def
* the default button
*
* @return the button pressed
*/
public static String getSubmitButton(HttpServletRequest request, String def)
{
Enumeration e = request.getParameterNames();
while (e.hasMoreElements())
{
String parameterName = (String) e.nextElement();
if (parameterName.startsWith("submit"))
{
return parameterName;
}
}
return def;
}
/**
* Gets Maven version string of the source that built this instance.
* @return string containing version, e.g. "1.5.2"; ends in "-SNAPSHOT" for development versions.
*/
public static String getSourceVersion()
{
if (sourceVersion == null)
{
Properties constants = new Properties();
InputStream cis = null;
try
{
cis = Util.class.getResourceAsStream("/META-INF/maven/org.dspace/dspace-api/pom.properties");
constants.load(cis);
}
catch(Exception e)
{
log.error(e.getMessage(),e);
}
finally
{
if (cis != null)
{
try
{
cis.close();
}
catch (IOException e)
{
log.error("Unable to close input stream", e);
}
}
}
sourceVersion = constants.getProperty("version", "none");
}
return sourceVersion;
}
}
| Java |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.