/* * IndexTask.java */ package org.ngbw.utils; import java.io.File; import java.io.FileInputStream; import java.io.InputStream; import java.io.IOException; import java.util.concurrent.Future; import java.util.zip.GZIPInputStream; import org.apache.lucene.analysis.KeywordAnalyzer; import org.apache.lucene.analysis.standard.StandardAnalyzer; import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; import org.apache.lucene.index.IndexWriter; /** * * @author Paul Hoover * */ abstract class IndexTask implements Task { private final IndexWriter m_fieldWriter; private final IndexWriter m_sourceWriter; protected final String m_dataset; protected final String m_dataDir; protected final String m_indexDir; protected final String m_fileName; protected IndexTask(String dataset, String dataDir, String indexDir, String fileName) throws IOException { m_dataset = dataset; m_dataDir = dataDir; m_indexDir = indexDir; m_fileName = fileName; String separator = System.getProperty("file.separator"); String fieldDir = m_indexDir + separator + m_dataset + separator + "FIELD_INDEX"; String sourceDir = m_indexDir + separator + m_dataset + separator + "SOURCE_INDEX"; m_fieldWriter = new IndexWriter(fieldDir, new StandardAnalyzer(), true); m_sourceWriter = new IndexWriter(sourceDir, new KeywordAnalyzer(), true); } public void run() { try { String separator = System.getProperty("file.separator"); int offset = m_fileName.lastIndexOf(separator); String baseDir; String fileName; if (offset != -1) { baseDir = m_dataDir + separator + m_fileName.substring(0, offset); fileName = m_fileName.substring(offset + 1); } else { baseDir = m_dataDir; fileName = m_fileName; } String[] names = (new File(baseDir)).list(new WildcardFilter(fileName)); for (int i = 0 ; i < names.length ; i += 1) { String name = baseDir + separator + names[i]; InputStream inStream; try { inStream = new GZIPInputStream(new FileInputStream(name)); } catch (IOException ioErr) { inStream = new FileInputStream(name); } try { addRecords(inStream); } finally { inStream.close(); } } optimizeIndex(); closeIndex(); } catch (Exception err) { err.printStackTrace(System.err); } } public void runInChildProcess() { try { closeIndex(); String separator = System.getProperty("file.separator"); String jvmPathName = System.getProperty("java.home") + separator + "bin" + separator + "java"; String javaClassPath = System.getProperty("java.class.path"); String className = this.getClass().getName(); String[] commandArray = new String[10]; commandArray[0] = jvmPathName; commandArray[1] = "-Xms1024m"; commandArray[2] = "-Xmx8192m"; commandArray[3] = "-cp"; commandArray[4] = javaClassPath; commandArray[5] = className; commandArray[6] = m_dataset; commandArray[7] = m_dataDir; commandArray[8] = m_indexDir; commandArray[9] = m_fileName; Process indexJob = Runtime.getRuntime().exec(commandArray); Future stdOut = InputStreamCollector.readInputStream(indexJob.getInputStream()); Future stdErr = InputStreamCollector.readInputStream(indexJob.getErrorStream()); int exitCode = indexJob.waitFor(); System.out.print(stdOut.get()); if (exitCode != 0) System.err.println("Indexing failed for dataset " + m_dataset); System.err.print(stdErr.get()); } catch (Exception err) { err.printStackTrace(System.err); } } protected abstract void addRecords(InputStream inStream) throws IOException; protected void addSequence(SequenceRecord record) throws IOException { Document newDoc = new Document(); newDoc.add(new Field("RECORD_ID", record.primaryId, Field.Store.YES, Field.Index.UN_TOKENIZED)); newDoc.add(new Field("PRIMARY_ID", record.primaryId, Field.Store.YES, Field.Index.UN_TOKENIZED)); newDoc.add(new Field("ALTERNATIVE_ID", record.alternativeId, Field.Store.YES, Field.Index.UN_TOKENIZED)); newDoc.add(new Field("NAME", record.name, Field.Store.YES, Field.Index.TOKENIZED)); newDoc.add(new Field("ORGANISM", record.organism, Field.Store.YES, Field.Index.TOKENIZED)); newDoc.add(new Field("VERSION", record.version, Field.Store.YES, Field.Index.UN_TOKENIZED)); newDoc.add(new Field("SIMPLE_SEARCH", record.filteredSource, Field.Store.NO, Field.Index.TOKENIZED)); newDoc.add(new Field("DATASET", m_dataset, Field.Store.YES, Field.Index.UN_TOKENIZED)); m_fieldWriter.addDocument(newDoc); addSource(record); } protected void addStructure(StructureRecord record) throws IOException { Document newDoc = new Document(); newDoc.add(new Field("RECORD_ID", record.primaryId, Field.Store.YES, Field.Index.UN_TOKENIZED)); newDoc.add(new Field("PRIMARY_ID", record.primaryId, Field.Store.YES, Field.Index.UN_TOKENIZED)); newDoc.add(new Field("TYPE", record.type, Field.Store.YES, Field.Index.UN_TOKENIZED)); newDoc.add(new Field("RESOLUTION", record.resolution, Field.Store.YES, Field.Index.UN_TOKENIZED)); newDoc.add(new Field("NAME", record.name, Field.Store.YES, Field.Index.TOKENIZED)); newDoc.add(new Field("AUTHOR", record.author, Field.Store.YES, Field.Index.TOKENIZED)); newDoc.add(new Field("MOLECULE", record.molecule, Field.Store.YES, Field.Index.TOKENIZED)); newDoc.add(new Field("ORGANISM", record.organism, Field.Store.YES, Field.Index.TOKENIZED)); newDoc.add(new Field("DEPOSITION_DATE", record.depositionDate, Field.Store.YES, Field.Index.UN_TOKENIZED)); newDoc.add(new Field("MODIFICATION_DATE", record.modificationDate, Field.Store.YES, Field.Index.TOKENIZED)); newDoc.add(new Field("SIMPLE_SEARCH", record.filteredSource, Field.Store.NO, Field.Index.TOKENIZED)); newDoc.add(new Field("DATASET", m_dataset, Field.Store.YES, Field.Index.UN_TOKENIZED)); m_fieldWriter.addDocument(newDoc); addSource(record); } private void optimizeIndex() throws IOException { m_fieldWriter.optimize(); m_sourceWriter.optimize(); } private void closeIndex() throws IOException { m_fieldWriter.close(); m_sourceWriter.close(); } private void addSource(Record record) throws IOException { Document newDoc = new Document(); newDoc.add(new Field("RECORD_ID", record.primaryId, Field.Store.YES, Field.Index.UN_TOKENIZED)); newDoc.add(new Field("RECORD_SOURCE", record.completeSource, Field.Store.COMPRESS, Field.Index.NO)); m_sourceWriter.addDocument(newDoc); } }