buggy_function
stringlengths
1
391k
fixed_function
stringlengths
0
392k
public void testBasic() throws IOException { URI uri = dfsCluster.getURI(); Path lockPath = new Path(uri.toString(), "/lock"); HdfsLockFactory lockFactory = new HdfsLockFactory(lockPath, new Configuration()); Lock lock = lockFactory.makeLock("testlock"); boolean success = lock.obtain(); assertTrue("We could not get the lock when it should be available", success); success = lock.obtain(); assertFalse("We got the lock but it should be unavailble", success); lock.release(); success = lock.obtain(); assertTrue("We could not get the lock when it should be available", success); success = lock.obtain(); assertFalse("We got the lock but it should be unavailble", success); }
public void testBasic() throws IOException { URI uri = dfsCluster.getURI(); Path lockPath = new Path(uri.toString(), "/basedir/lock"); HdfsLockFactory lockFactory = new HdfsLockFactory(lockPath, new Configuration()); Lock lock = lockFactory.makeLock("testlock"); boolean success = lock.obtain(); assertTrue("We could not get the lock when it should be available", success); success = lock.obtain(); assertFalse("We got the lock but it should be unavailble", success); lock.release(); success = lock.obtain(); assertTrue("We could not get the lock when it should be available", success); success = lock.obtain(); assertFalse("We got the lock but it should be unavailble", success); }
private static final String WRITE_CONSISTENCY_LEVEL = "cassandra.consistencylevel.write"; private static final Logger logger = LoggerFactory.getLogger(ColumnFamilyInputFormat.class);
private static final String WRITE_CONSISTENCY_LEVEL = "cassandra.consistencylevel.write"; private static final Logger logger = LoggerFactory.getLogger(ConfigHelper.class);
public static org.apache.cassandra.db.migration.avro.CfDef convertToAvro(org.apache.cassandra.thrift.CfDef def) { org.apache.cassandra.db.migration.avro.CfDef newDef = new org.apache.cassandra.db.migration.avro.CfDef(); newDef.keyspace = def.getKeyspace(); newDef.name = def.getName(); newDef.column_type = def.getColumn_type(); newDef.comment = def.getComment(); newDef.comparator_type = def.getComparator_type(); newDef.default_validation_class = def.getDefault_validation_class(); newDef.key_validation_class = def.getKey_validation_class(); newDef.gc_grace_seconds = def.getGc_grace_seconds(); newDef.id = def.getId(); newDef.key_cache_save_period_in_seconds = def.getKey_cache_save_period_in_seconds(); newDef.key_cache_size = def.getKey_cache_size(); newDef.max_compaction_threshold = def.getMax_compaction_threshold(); newDef.memtable_flush_after_mins = def.getMemtable_flush_after_mins(); newDef.memtable_operations_in_millions = def.getMemtable_operations_in_millions(); newDef.memtable_throughput_in_mb = def.getMemtable_throughput_in_mb(); newDef.min_compaction_threshold = def.getMin_compaction_threshold(); newDef.read_repair_chance = def.getRead_repair_chance(); newDef.replicate_on_write = def.isReplicate_on_write(); newDef.row_cache_save_period_in_seconds = def.getRow_cache_save_period_in_seconds(); newDef.row_cache_size = def.getRow_cache_size(); newDef.subcomparator_type = def.getSubcomparator_type(); newDef.merge_shards_chance = def.getMerge_shards_chance(); List<org.apache.cassandra.db.migration.avro.ColumnDef> columnMeta = new ArrayList<org.apache.cassandra.db.migration.avro.ColumnDef>(); if (def.isSetColumn_metadata()) { for (org.apache.cassandra.thrift.ColumnDef cdef : def.getColumn_metadata()) { org.apache.cassandra.db.migration.avro.ColumnDef tdef = new org.apache.cassandra.db.migration.avro.ColumnDef(); tdef.name = ByteBufferUtil.clone(cdef.BufferForName()); tdef.validation_class = cdef.getValidation_class(); tdef.index_name = cdef.getIndex_name(); tdef.index_type = cdef.getIndex_type() == null ? null : org.apache.cassandra.db.migration.avro.IndexType.valueOf(cdef.getIndex_type().name()); columnMeta.add(tdef); } } newDef.column_metadata = columnMeta; return newDef; }
public static org.apache.cassandra.db.migration.avro.CfDef convertToAvro(org.apache.cassandra.thrift.CfDef def) { org.apache.cassandra.db.migration.avro.CfDef newDef = new org.apache.cassandra.db.migration.avro.CfDef(); newDef.keyspace = def.getKeyspace(); newDef.name = def.getName(); newDef.column_type = def.getColumn_type(); newDef.comment = def.getComment(); newDef.comparator_type = def.getComparator_type(); newDef.default_validation_class = def.getDefault_validation_class(); newDef.key_validation_class = def.getKey_validation_class(); newDef.gc_grace_seconds = def.getGc_grace_seconds(); newDef.id = def.getId(); newDef.key_cache_save_period_in_seconds = def.getKey_cache_save_period_in_seconds(); newDef.key_cache_size = def.getKey_cache_size(); newDef.max_compaction_threshold = def.getMax_compaction_threshold(); newDef.memtable_flush_after_mins = def.getMemtable_flush_after_mins(); newDef.memtable_operations_in_millions = def.getMemtable_operations_in_millions(); newDef.memtable_throughput_in_mb = def.getMemtable_throughput_in_mb(); newDef.min_compaction_threshold = def.getMin_compaction_threshold(); newDef.read_repair_chance = def.getRead_repair_chance(); newDef.replicate_on_write = def.isReplicate_on_write(); newDef.row_cache_save_period_in_seconds = def.getRow_cache_save_period_in_seconds(); newDef.row_cache_size = def.getRow_cache_size(); newDef.subcomparator_type = def.getSubcomparator_type(); newDef.merge_shards_chance = def.getMerge_shards_chance(); List<org.apache.cassandra.db.migration.avro.ColumnDef> columnMeta = new ArrayList<org.apache.cassandra.db.migration.avro.ColumnDef>(); if (def.isSetColumn_metadata()) { for (org.apache.cassandra.thrift.ColumnDef cdef : def.getColumn_metadata()) { org.apache.cassandra.db.migration.avro.ColumnDef tdef = new org.apache.cassandra.db.migration.avro.ColumnDef(); tdef.name = ByteBufferUtil.clone(cdef.bufferForName()); tdef.validation_class = cdef.getValidation_class(); tdef.index_name = cdef.getIndex_name(); tdef.index_type = cdef.getIndex_type() == null ? null : org.apache.cassandra.db.migration.avro.IndexType.valueOf(cdef.getIndex_type().name()); columnMeta.add(tdef); } } newDef.column_metadata = columnMeta; return newDef; }
public void reloadConfiguration() throws ConfigurationException { hostProperties = resourceToProperties(RACK_PROPERTY_FILENAME); invalidateCachedSnitchValues(); }
public void reloadConfiguration() throws ConfigurationException { hostProperties = resourceToProperties(RACK_PROPERTY_FILENAME); clearEndpointCache(); }
public ExecRow getNextRowCore() throws StandardException { if ( isOpen ) { if ( ! next ) { next = true; if (SanityManager.DEBUG) SanityManager.ASSERT(! cursor.isClosed(), "cursor closed"); ExecRow cursorRow = cursor.getCurrentRow(); // requalify the current row if (cursorRow == null) { throw StandardException.newException(SQLState.LANG_NO_CURRENT_ROW, cursorName); } // we know it will be requested, may as well get it now. rowLocation = cursor.getRowLocation(); // get the row from the base table, which is the real result // row for the CurrentOfResultSet currentRow = target.getCurrentRow(); // if the source result set is a ScrollInsensitiveResultSet, and // the current row has been deleted (while the cursor was // opened), the cursor result set (scroll insensitive) will // return the cached row, while the target result set will // return null (row has been deleted under owr feet). if (rowLocation == null || (cursorRow != null && currentRow == null)) { activation.addWarning(StandardException. newWarning(SQLState.CURSOR_OPERATION_CONFLICT)); return null; } /* beetle 3865: updateable cursor using index. If underlying is a covering * index, target is a TableScanRS (instead of a IndexRow2BaseRowRS) for the * index scan. But the problem is it returns a compact row in index key order. * However the ProjectRestrictRS above us that sets up the old and new column * values expects us to return a sparse row in heap order. We have to do the * wiring here, since we don't have IndexRow2BaseRowRS to do this work. This * problem was not exposed before, because we never used index scan for updateable * cursors. */ if (target instanceof TableScanResultSet) { TableScanResultSet scan = (TableScanResultSet) target; if (scan.indexCols != null && currentRow != null) currentRow = getSparseRow(currentRow, scan.indexCols); } /* If we are updating rows from cached RIDs, we should compare with forward-most * scan key when deciding whether to add RID to hash table or not. */ TableScanResultSet scan = (TableScanResultSet) activation.getForUpdateIndexScan(); if (scan != null) { if (target instanceof IndexRowToBaseRowResultSet) scan.compareToLastKey = ((IndexRowToBaseRowResultSet) target).currentRowPrescanned; else if (target instanceof TableScanResultSet) scan.compareToLastKey = ((TableScanResultSet) target).currentRowPrescanned; } // REMIND: verify the row is still there // at present we get an ugly exception from the store, // Hopefully someday we can just do this: // // if (!rowLocation.rowExists()) // throw StandardException.newException(SQLState.LANG_NO_CURRENT_ROW, cursorName); } else { currentRow = null; rowLocation = null; } } else { currentRow = null; rowLocation = null; } setCurrentRow(currentRow); return currentRow; }
public ExecRow getNextRowCore() throws StandardException { if ( isOpen ) { if ( ! next ) { next = true; if (SanityManager.DEBUG) SanityManager.ASSERT(! cursor.isClosed(), "cursor closed"); ExecRow cursorRow = cursor.getCurrentRow(); // requalify the current row if (cursorRow == null) { throw StandardException.newException(SQLState.NO_CURRENT_ROW); } // we know it will be requested, may as well get it now. rowLocation = cursor.getRowLocation(); // get the row from the base table, which is the real result // row for the CurrentOfResultSet currentRow = target.getCurrentRow(); // if the source result set is a ScrollInsensitiveResultSet, and // the current row has been deleted (while the cursor was // opened), the cursor result set (scroll insensitive) will // return the cached row, while the target result set will // return null (row has been deleted under owr feet). if (rowLocation == null || (cursorRow != null && currentRow == null)) { activation.addWarning(StandardException. newWarning(SQLState.CURSOR_OPERATION_CONFLICT)); return null; } /* beetle 3865: updateable cursor using index. If underlying is a covering * index, target is a TableScanRS (instead of a IndexRow2BaseRowRS) for the * index scan. But the problem is it returns a compact row in index key order. * However the ProjectRestrictRS above us that sets up the old and new column * values expects us to return a sparse row in heap order. We have to do the * wiring here, since we don't have IndexRow2BaseRowRS to do this work. This * problem was not exposed before, because we never used index scan for updateable * cursors. */ if (target instanceof TableScanResultSet) { TableScanResultSet scan = (TableScanResultSet) target; if (scan.indexCols != null && currentRow != null) currentRow = getSparseRow(currentRow, scan.indexCols); } /* If we are updating rows from cached RIDs, we should compare with forward-most * scan key when deciding whether to add RID to hash table or not. */ TableScanResultSet scan = (TableScanResultSet) activation.getForUpdateIndexScan(); if (scan != null) { if (target instanceof IndexRowToBaseRowResultSet) scan.compareToLastKey = ((IndexRowToBaseRowResultSet) target).currentRowPrescanned; else if (target instanceof TableScanResultSet) scan.compareToLastKey = ((TableScanResultSet) target).currentRowPrescanned; } // REMIND: verify the row is still there // at present we get an ugly exception from the store, // Hopefully someday we can just do this: // // if (!rowLocation.rowExists()) // throw StandardException.newException(SQLState.LANG_NO_CURRENT_ROW, cursorName); } else { currentRow = null; rowLocation = null; } } else { currentRow = null; rowLocation = null; } setCurrentRow(currentRow); return currentRow; }
private void testAntiCompaction(String columnFamilyName, int insertsPerTable) throws IOException, ExecutionException, InterruptedException { Table table = Table.open("Keyspace1"); ColumnFamilyStore store = table.getColumnFamilyStore(columnFamilyName); for (int j = 0; j < insertsPerTable; j++) { String key = String.valueOf(j); RowMutation rm = new RowMutation("Keyspace1", key); rm.add(new QueryPath(columnFamilyName, null, "0".getBytes()), new byte[0], j); rm.apply(); } store.forceBlockingFlush(); List<String> fileList = new ArrayList<String>(); List<Range> ranges = new ArrayList<Range>(); IPartitioner partitioner = new CollatingOrderPreservingPartitioner(); Range r = new Range(partitioner.getToken("0"), partitioner.getToken("zzzzzzz")); ranges.add(r); boolean result = store.doAntiCompaction(ranges, new EndPoint("127.0.0.1", 9150), fileList); assertEquals(true, result); // some keys should have qualified assertEquals(true, fileList.size() >= 3); //Data, index, filter files }
private void testAntiCompaction(String columnFamilyName, int insertsPerTable) throws IOException, ExecutionException, InterruptedException { Table table = Table.open("Keyspace1"); ColumnFamilyStore store = table.getColumnFamilyStore(columnFamilyName); for (int j = 0; j < insertsPerTable; j++) { String key = String.valueOf(j); RowMutation rm = new RowMutation("Keyspace1", key); rm.add(new QueryPath(columnFamilyName, null, "0".getBytes()), new byte[0], j); rm.apply(); } store.forceBlockingFlush(); List<String> fileList = new ArrayList<String>(); List<Range> ranges = new ArrayList<Range>(); IPartitioner partitioner = new CollatingOrderPreservingPartitioner(); Range r = new Range(partitioner.getToken("0"), partitioner.getToken("zzzzzzz")); ranges.add(r); boolean result = store.forceCompaction(ranges, new EndPoint("127.0.0.1", 9150), 0, fileList); assertEquals(true, result); // some keys should have qualified assertEquals(true, fileList.size() >= 3); //Data, index, filter files }
protected void testCompaction(int sstableCount, int rowsPerSSTable, int colsPerRow) throws Exception { CompactionManager.instance.disableAutoCompaction(); Table table = Table.open(TABLE1); ColumnFamilyStore store = table.getColumnFamilyStore("Standard1"); ArrayList<SSTableReader> sstables = new ArrayList<SSTableReader>(); for (int k = 0; k < sstableCount; k++) { SortedMap<String,ColumnFamily> rows = new TreeMap<String,ColumnFamily>(); for (int j = 0; j < rowsPerSSTable; j++) { String key = String.valueOf(j); IColumn[] cols = new IColumn[colsPerRow]; for (int i = 0; i < colsPerRow; i++) { // last sstable has highest timestamps cols[i] = Util.column(String.valueOf(i), String.valueOf(i), k); } rows.put(key, SSTableUtils.createCF(Long.MIN_VALUE, Integer.MIN_VALUE, cols)); } SSTableReader sstable = SSTableUtils.writeSSTable(rows); sstables.add(sstable); store.addSSTable(sstable); } // give garbage collection a bit of time to catch up Thread.sleep(1000); long start = System.currentTimeMillis(); CompactionManager.instance.doCompaction(store, sstables, (int) (System.currentTimeMillis() / 1000) - DatabaseDescriptor.getCFMetaData(TABLE1, "Standard1").getGcGraceSeconds()); System.out.println(String.format("%s: sstables=%d rowsper=%d colsper=%d: %d ms", this.getClass().getName(), sstableCount, rowsPerSSTable, colsPerRow, System.currentTimeMillis() - start)); }
protected void testCompaction(int sstableCount, int rowsPerSSTable, int colsPerRow) throws Exception { CompactionManager.instance.disableAutoCompaction(); Table table = Table.open(TABLE1); ColumnFamilyStore store = table.getColumnFamilyStore("Standard1"); ArrayList<SSTableReader> sstables = new ArrayList<SSTableReader>(); for (int k = 0; k < sstableCount; k++) { SortedMap<String,ColumnFamily> rows = new TreeMap<String,ColumnFamily>(); for (int j = 0; j < rowsPerSSTable; j++) { String key = String.valueOf(j); IColumn[] cols = new IColumn[colsPerRow]; for (int i = 0; i < colsPerRow; i++) { // last sstable has highest timestamps cols[i] = Util.column(String.valueOf(i), String.valueOf(i), k); } rows.put(key, SSTableUtils.createCF(Long.MIN_VALUE, Integer.MIN_VALUE, cols)); } SSTableReader sstable = SSTableUtils.prepare().write(rows); sstables.add(sstable); store.addSSTable(sstable); } // give garbage collection a bit of time to catch up Thread.sleep(1000); long start = System.currentTimeMillis(); CompactionManager.instance.doCompaction(store, sstables, (int) (System.currentTimeMillis() / 1000) - DatabaseDescriptor.getCFMetaData(TABLE1, "Standard1").getGcGraceSeconds()); System.out.println(String.format("%s: sstables=%d rowsper=%d colsper=%d: %d ms", this.getClass().getName(), sstableCount, rowsPerSSTable, colsPerRow, System.currentTimeMillis() - start)); }
public void testRecoverAndOpen() throws IOException, ExecutionException, InterruptedException { RowMutation rm; rm = new RowMutation("Keyspace1", ByteBufferUtil.bytes("k1")); rm.add(new QueryPath("Indexed1", null, ByteBufferUtil.bytes("birthdate")), FBUtilities.toByteBuffer(1L), 0); rm.apply(); ColumnFamily cf = ColumnFamily.create("Keyspace1", "Indexed1"); cf.addColumn(new Column(ByteBufferUtil.bytes("birthdate"), FBUtilities.toByteBuffer(1L), 0)); cf.addColumn(new Column(ByteBufferUtil.bytes("anydate"), FBUtilities.toByteBuffer(1L), 0)); Map<ByteBuffer, ByteBuffer> entries = new HashMap<ByteBuffer, ByteBuffer>(); DataOutputBuffer buffer = new DataOutputBuffer(); ColumnFamily.serializer().serializeWithIndexes(cf, buffer); entries.put(ByteBufferUtil.bytes("k2"), ByteBuffer.wrap(Arrays.copyOf(buffer.getData(), buffer.getLength()))); cf.clear(); cf.addColumn(new Column(ByteBufferUtil.bytes("anydate"), FBUtilities.toByteBuffer(1L), 0)); buffer = new DataOutputBuffer(); ColumnFamily.serializer().serializeWithIndexes(cf, buffer); entries.put(ByteBufferUtil.bytes("k3"), ByteBuffer.wrap(Arrays.copyOf(buffer.getData(), buffer.getLength()))); SSTableReader orig = SSTableUtils.writeRawSSTable("Keyspace1", "Indexed1", entries); // whack the index to trigger the recover FileUtils.deleteWithConfirm(orig.descriptor.filenameFor(Component.PRIMARY_INDEX)); FileUtils.deleteWithConfirm(orig.descriptor.filenameFor(Component.FILTER)); SSTableReader sstr = CompactionManager.instance.submitSSTableBuild(orig.descriptor, OperationType.AES).get(); assert sstr != null; ColumnFamilyStore cfs = Table.open("Keyspace1").getColumnFamilyStore("Indexed1"); cfs.addSSTable(sstr); cfs.buildSecondaryIndexes(cfs.getSSTables(), cfs.getIndexedColumns()); IndexExpression expr = new IndexExpression(ByteBufferUtil.bytes("birthdate"), IndexOperator.EQ, FBUtilities.toByteBuffer(1L)); IndexClause clause = new IndexClause(Arrays.asList(expr), FBUtilities.EMPTY_BYTE_BUFFER, 100); IFilter filter = new IdentityQueryFilter(); IPartitioner p = StorageService.getPartitioner(); Range range = new Range(p.getMinimumToken(), p.getMinimumToken()); List<Row> rows = cfs.scan(clause, range, filter); assertEquals("IndexExpression should return two rows on recoverAndOpen", 2, rows.size()); assertTrue("First result should be 'k1'",ByteBufferUtil.bytes("k1").equals(rows.get(0).key.key)); }
public void testRecoverAndOpen() throws IOException, ExecutionException, InterruptedException { RowMutation rm; rm = new RowMutation("Keyspace1", ByteBufferUtil.bytes("k1")); rm.add(new QueryPath("Indexed1", null, ByteBufferUtil.bytes("birthdate")), FBUtilities.toByteBuffer(1L), 0); rm.apply(); ColumnFamily cf = ColumnFamily.create("Keyspace1", "Indexed1"); cf.addColumn(new Column(ByteBufferUtil.bytes("birthdate"), FBUtilities.toByteBuffer(1L), 0)); cf.addColumn(new Column(ByteBufferUtil.bytes("anydate"), FBUtilities.toByteBuffer(1L), 0)); Map<ByteBuffer, ByteBuffer> entries = new HashMap<ByteBuffer, ByteBuffer>(); DataOutputBuffer buffer = new DataOutputBuffer(); ColumnFamily.serializer().serializeWithIndexes(cf, buffer); entries.put(ByteBufferUtil.bytes("k2"), ByteBuffer.wrap(Arrays.copyOf(buffer.getData(), buffer.getLength()))); cf.clear(); cf.addColumn(new Column(ByteBufferUtil.bytes("anydate"), FBUtilities.toByteBuffer(1L), 0)); buffer = new DataOutputBuffer(); ColumnFamily.serializer().serializeWithIndexes(cf, buffer); entries.put(ByteBufferUtil.bytes("k3"), ByteBuffer.wrap(Arrays.copyOf(buffer.getData(), buffer.getLength()))); SSTableReader orig = SSTableUtils.prepare().cf("Indexed1").writeRaw(entries); // whack the index to trigger the recover FileUtils.deleteWithConfirm(orig.descriptor.filenameFor(Component.PRIMARY_INDEX)); FileUtils.deleteWithConfirm(orig.descriptor.filenameFor(Component.FILTER)); SSTableReader sstr = CompactionManager.instance.submitSSTableBuild(orig.descriptor, OperationType.AES).get(); assert sstr != null; ColumnFamilyStore cfs = Table.open("Keyspace1").getColumnFamilyStore("Indexed1"); cfs.addSSTable(sstr); cfs.buildSecondaryIndexes(cfs.getSSTables(), cfs.getIndexedColumns()); IndexExpression expr = new IndexExpression(ByteBufferUtil.bytes("birthdate"), IndexOperator.EQ, FBUtilities.toByteBuffer(1L)); IndexClause clause = new IndexClause(Arrays.asList(expr), FBUtilities.EMPTY_BYTE_BUFFER, 100); IFilter filter = new IdentityQueryFilter(); IPartitioner p = StorageService.getPartitioner(); Range range = new Range(p.getMinimumToken(), p.getMinimumToken()); List<Row> rows = cfs.scan(clause, range, filter); assertEquals("IndexExpression should return two rows on recoverAndOpen", 2, rows.size()); assertTrue("First result should be 'k1'",ByteBufferUtil.bytes("k1").equals(rows.get(0).key.key)); }
private Collection<ScoreTerm> suggestSimilar(Term term, int numSug, IndexReader ir, int docfreq, int editDistance, float accuracy, final CharsRef spare) throws IOException { AttributeSource atts = new AttributeSource(); MaxNonCompetitiveBoostAttribute maxBoostAtt = atts.addAttribute(MaxNonCompetitiveBoostAttribute.class); Terms terms = MultiFields.getTerms(ir, term.field()); if (terms == null) { return Collections.emptyList(); } FuzzyTermsEnum e = new FuzzyTermsEnum(terms, atts, term, editDistance, Math.max(minPrefix, editDistance-1)); final PriorityQueue<ScoreTerm> stQueue = new PriorityQueue<ScoreTerm>(); BytesRef queryTerm = new BytesRef(term.text()); BytesRef candidateTerm; ScoreTerm st = new ScoreTerm(); BoostAttribute boostAtt = e.attributes().addAttribute(BoostAttribute.class); while ((candidateTerm = e.next()) != null) { final float boost = boostAtt.getBoost(); // ignore uncompetitive hits if (stQueue.size() >= numSug && boost <= stQueue.peek().boost) continue; // ignore exact match of the same term if (queryTerm.bytesEquals(candidateTerm)) continue; int df = e.docFreq(); // check docFreq if required if (df <= docfreq) continue; final float score; final String termAsString; if (distance == INTERNAL_LEVENSHTEIN) { // delay creating strings until the end termAsString = null; // undo FuzzyTermsEnum's scale factor for a real scaled lev score score = boost / e.getScaleFactor() + e.getMinSimilarity(); } else { termAsString = candidateTerm.utf8ToChars(spare).toString(); score = distance.getDistance(term.text(), termAsString); } if (score < accuracy) continue; // add new entry in PQ st.term = new BytesRef(candidateTerm); st.boost = boost; st.docfreq = df; st.termAsString = termAsString; st.score = score; stQueue.offer(st); // possibly drop entries from queue st = (stQueue.size() > numSug) ? stQueue.poll() : new ScoreTerm(); maxBoostAtt.setMaxNonCompetitiveBoost((stQueue.size() >= numSug) ? stQueue.peek().boost : Float.NEGATIVE_INFINITY); } return stQueue; }
private Collection<ScoreTerm> suggestSimilar(Term term, int numSug, IndexReader ir, int docfreq, int editDistance, float accuracy, final CharsRef spare) throws IOException { AttributeSource atts = new AttributeSource(); MaxNonCompetitiveBoostAttribute maxBoostAtt = atts.addAttribute(MaxNonCompetitiveBoostAttribute.class); Terms terms = MultiFields.getTerms(ir, term.field()); if (terms == null) { return Collections.emptyList(); } FuzzyTermsEnum e = new FuzzyTermsEnum(terms, atts, term, editDistance, Math.max(minPrefix, editDistance-1)); final PriorityQueue<ScoreTerm> stQueue = new PriorityQueue<ScoreTerm>(); BytesRef queryTerm = new BytesRef(term.text()); BytesRef candidateTerm; ScoreTerm st = new ScoreTerm(); BoostAttribute boostAtt = e.attributes().addAttribute(BoostAttribute.class); while ((candidateTerm = e.next()) != null) { final float boost = boostAtt.getBoost(); // ignore uncompetitive hits if (stQueue.size() >= numSug && boost <= stQueue.peek().boost) continue; // ignore exact match of the same term if (queryTerm.bytesEquals(candidateTerm)) continue; int df = e.docFreq(); // check docFreq if required if (df <= docfreq) continue; final float score; final String termAsString; if (distance == INTERNAL_LEVENSHTEIN) { // delay creating strings until the end termAsString = null; // undo FuzzyTermsEnum's scale factor for a real scaled lev score score = boost / e.getScaleFactor() + e.getMinSimilarity(); } else { termAsString = candidateTerm.utf8ToChars(spare).toString(); score = distance.getDistance(term.text(), termAsString); } if (score < accuracy) continue; // add new entry in PQ st.term = BytesRef.deepCopyOf(candidateTerm); st.boost = boost; st.docfreq = df; st.termAsString = termAsString; st.score = score; stQueue.offer(st); // possibly drop entries from queue st = (stQueue.size() > numSug) ? stQueue.poll() : new ScoreTerm(); maxBoostAtt.setMaxNonCompetitiveBoost((stQueue.size() >= numSug) ? stQueue.peek().boost : Float.NEGATIVE_INFINITY); } return stQueue; }
protected void add(int base, IndexReader r) throws IOException { Terms terms = r.terms(F_WORD); if (terms != null) termsEnums.add(terms.iterator(null)); } }.run(); } boolean isEmpty = termsEnums.isEmpty(); try { Iterator<String> iter = dict.getWordsIterator(); BytesRef currentTerm = new BytesRef(); terms: while (iter.hasNext()) { String word = iter.next(); int len = word.length(); if (len < 3) { continue; // too short we bail but "too long" is fine... } if (!isEmpty) { // we have a non-empty index, check if the term exists currentTerm.copy(word); for (TermsEnum te : termsEnums) { if (te.seekExact(currentTerm, false)) { continue terms; } } } // ok index the word Document doc = createDocument(word, getMin(len), getMax(len)); writer.addDocument(doc); } } finally { releaseSearcher(indexSearcher); } if (fullMerge) { writer.forceMerge(1); } // close writer writer.close(); // TODO: this isn't that great, maybe in the future SpellChecker should take // IWC in its ctor / keep its writer open? // also re-open the spell index to see our own changes when the next suggestion // is fetched: swapSearcher(dir); } }
protected void add(int base, IndexReader r) throws IOException { Terms terms = r.terms(F_WORD); if (terms != null) termsEnums.add(terms.iterator(null)); } }.run(); } boolean isEmpty = termsEnums.isEmpty(); try { Iterator<String> iter = dict.getWordsIterator(); BytesRef currentTerm = new BytesRef(); terms: while (iter.hasNext()) { String word = iter.next(); int len = word.length(); if (len < 3) { continue; // too short we bail but "too long" is fine... } if (!isEmpty) { // we have a non-empty index, check if the term exists currentTerm.copyChars(word); for (TermsEnum te : termsEnums) { if (te.seekExact(currentTerm, false)) { continue terms; } } } // ok index the word Document doc = createDocument(word, getMin(len), getMax(len)); writer.addDocument(doc); } } finally { releaseSearcher(indexSearcher); } if (fullMerge) { writer.forceMerge(1); } // close writer writer.close(); // TODO: this isn't that great, maybe in the future SpellChecker should take // IWC in its ctor / keep its writer open? // also re-open the spell index to see our own changes when the next suggestion // is fetched: swapSearcher(dir); } }
public Filter getFilter(Element e) throws ParserException { TermsFilter tf = new TermsFilter(); String text = DOMUtils.getNonBlankTextOrFail(e); String fieldName = DOMUtils.getAttributeWithInheritanceOrFail(e, "fieldName"); try { TokenStream ts = analyzer.tokenStream(fieldName, new StringReader(text)); TermToBytesRefAttribute termAtt = ts.addAttribute(TermToBytesRefAttribute.class); Term term = null; BytesRef bytes = termAtt.getBytesRef(); ts.reset(); while (ts.incrementToken()) { termAtt.fillBytesRef(); term = new Term(fieldName, new BytesRef(bytes)); tf.addTerm(term); } ts.end(); ts.close(); } catch (IOException ioe) { throw new RuntimeException("Error constructing terms from index:" + ioe); } return tf; }
public Filter getFilter(Element e) throws ParserException { TermsFilter tf = new TermsFilter(); String text = DOMUtils.getNonBlankTextOrFail(e); String fieldName = DOMUtils.getAttributeWithInheritanceOrFail(e, "fieldName"); try { TokenStream ts = analyzer.tokenStream(fieldName, new StringReader(text)); TermToBytesRefAttribute termAtt = ts.addAttribute(TermToBytesRefAttribute.class); Term term = null; BytesRef bytes = termAtt.getBytesRef(); ts.reset(); while (ts.incrementToken()) { termAtt.fillBytesRef(); term = new Term(fieldName, BytesRef.deepCopyOf(bytes)); tf.addTerm(term); } ts.end(); ts.close(); } catch (IOException ioe) { throw new RuntimeException("Error constructing terms from index:" + ioe); } return tf; }
public SpanQuery getSpanQuery(Element e) throws ParserException { String fieldName = DOMUtils.getAttributeWithInheritanceOrFail(e, "fieldName"); String value = DOMUtils.getNonBlankTextOrFail(e); try { List<SpanQuery> clausesList = new ArrayList<SpanQuery>(); TokenStream ts = analyzer.tokenStream(fieldName, new StringReader(value)); TermToBytesRefAttribute termAtt = ts.addAttribute(TermToBytesRefAttribute.class); BytesRef bytes = termAtt.getBytesRef(); ts.reset(); while (ts.incrementToken()) { termAtt.fillBytesRef(); SpanTermQuery stq = new SpanTermQuery(new Term(fieldName, new BytesRef(bytes))); clausesList.add(stq); } ts.end(); ts.close(); SpanOrQuery soq = new SpanOrQuery(clausesList.toArray(new SpanQuery[clausesList.size()])); soq.setBoost(DOMUtils.getAttribute(e, "boost", 1.0f)); return soq; } catch (IOException ioe) { throw new ParserException("IOException parsing value:" + value); } }
public SpanQuery getSpanQuery(Element e) throws ParserException { String fieldName = DOMUtils.getAttributeWithInheritanceOrFail(e, "fieldName"); String value = DOMUtils.getNonBlankTextOrFail(e); try { List<SpanQuery> clausesList = new ArrayList<SpanQuery>(); TokenStream ts = analyzer.tokenStream(fieldName, new StringReader(value)); TermToBytesRefAttribute termAtt = ts.addAttribute(TermToBytesRefAttribute.class); BytesRef bytes = termAtt.getBytesRef(); ts.reset(); while (ts.incrementToken()) { termAtt.fillBytesRef(); SpanTermQuery stq = new SpanTermQuery(new Term(fieldName, BytesRef.deepCopyOf(bytes))); clausesList.add(stq); } ts.end(); ts.close(); SpanOrQuery soq = new SpanOrQuery(clausesList.toArray(new SpanQuery[clausesList.size()])); soq.setBoost(DOMUtils.getAttribute(e, "boost", 1.0f)); return soq; } catch (IOException ioe) { throw new ParserException("IOException parsing value:" + value); } }
public Query getQuery(Element e) throws ParserException { String fieldName = DOMUtils.getAttributeWithInheritanceOrFail(e, "fieldName"); String text = DOMUtils.getNonBlankTextOrFail(e); BooleanQuery bq = new BooleanQuery(DOMUtils.getAttribute(e, "disableCoord", false)); bq.setMinimumNumberShouldMatch(DOMUtils.getAttribute(e, "minimumNumberShouldMatch", 0)); try { TokenStream ts = analyzer.tokenStream(fieldName, new StringReader(text)); TermToBytesRefAttribute termAtt = ts.addAttribute(TermToBytesRefAttribute.class); Term term = null; BytesRef bytes = termAtt.getBytesRef(); ts.reset(); while (ts.incrementToken()) { termAtt.fillBytesRef(); term = new Term(fieldName, new BytesRef(bytes)); bq.add(new BooleanClause(new TermQuery(term), BooleanClause.Occur.SHOULD)); } ts.end(); ts.close(); } catch (IOException ioe) { throw new RuntimeException("Error constructing terms from index:" + ioe); } bq.setBoost(DOMUtils.getAttribute(e, "boost", 1.0f)); return bq; }
public Query getQuery(Element e) throws ParserException { String fieldName = DOMUtils.getAttributeWithInheritanceOrFail(e, "fieldName"); String text = DOMUtils.getNonBlankTextOrFail(e); BooleanQuery bq = new BooleanQuery(DOMUtils.getAttribute(e, "disableCoord", false)); bq.setMinimumNumberShouldMatch(DOMUtils.getAttribute(e, "minimumNumberShouldMatch", 0)); try { TokenStream ts = analyzer.tokenStream(fieldName, new StringReader(text)); TermToBytesRefAttribute termAtt = ts.addAttribute(TermToBytesRefAttribute.class); Term term = null; BytesRef bytes = termAtt.getBytesRef(); ts.reset(); while (ts.incrementToken()) { termAtt.fillBytesRef(); term = new Term(fieldName, BytesRef.deepCopyOf(bytes)); bq.add(new BooleanClause(new TermQuery(term), BooleanClause.Occur.SHOULD)); } ts.end(); ts.close(); } catch (IOException ioe) { throw new RuntimeException("Error constructing terms from index:" + ioe); } bq.setBoost(DOMUtils.getAttribute(e, "boost", 1.0f)); return bq; }
public DocIdSet getDocIdSet(AtomicReaderContext context, Bits acceptDocs) throws IOException { IndexReader reader = context.reader; FixedBitSet result = new FixedBitSet(reader.maxDoc()); Fields fields = reader.fields(); if (fields == null) { return result; } BytesRef br = new BytesRef(); String lastField = null; Terms termsC = null; TermsEnum termsEnum = null; DocsEnum docs = null; for (Term term : terms) { if (!term.field().equals(lastField)) { termsC = fields.terms(term.field()); if (termsC == null) { return result; } termsEnum = termsC.iterator(null); lastField = term.field(); } if (terms != null) { // TODO this check doesn't make sense, decide which variable its supposed to be for br.copy(term.bytes()); if (termsEnum.seekCeil(br) == TermsEnum.SeekStatus.FOUND) { docs = termsEnum.docs(acceptDocs, docs); while (docs.nextDoc() != DocsEnum.NO_MORE_DOCS) { result.set(docs.docID()); } } } } return result; }
public DocIdSet getDocIdSet(AtomicReaderContext context, Bits acceptDocs) throws IOException { IndexReader reader = context.reader; FixedBitSet result = new FixedBitSet(reader.maxDoc()); Fields fields = reader.fields(); if (fields == null) { return result; } BytesRef br = new BytesRef(); String lastField = null; Terms termsC = null; TermsEnum termsEnum = null; DocsEnum docs = null; for (Term term : terms) { if (!term.field().equals(lastField)) { termsC = fields.terms(term.field()); if (termsC == null) { return result; } termsEnum = termsC.iterator(null); lastField = term.field(); } if (terms != null) { // TODO this check doesn't make sense, decide which variable its supposed to be for br.copyBytes(term.bytes()); if (termsEnum.seekCeil(br) == TermsEnum.SeekStatus.FOUND) { docs = termsEnum.docs(acceptDocs, docs); while (docs.nextDoc() != DocsEnum.NO_MORE_DOCS) { result.set(docs.docID()); } } } } return result; }
public boolean bytesVal(int doc, BytesRef target) { String s = strVal(doc); if (s==null) { target.length = 0; return false; } target.copy(s); return true; };
public boolean bytesVal(int doc, BytesRef target) { String s = strVal(doc); if (s==null) { target.length = 0; return false; } target.copyChars(s); return true; };
protected List<BytesRef> analyze(String text, String field, Analyzer analyzer) throws IOException { List<BytesRef> bytesRefs = new ArrayList<BytesRef>(); TokenStream tokenStream = analyzer.tokenStream(field, new StringReader(text)); TermToBytesRefAttribute termAttribute = tokenStream.getAttribute(TermToBytesRefAttribute.class); BytesRef bytesRef = termAttribute.getBytesRef(); while (tokenStream.incrementToken()) { termAttribute.fillBytesRef(); bytesRefs.add(new BytesRef(bytesRef)); } tokenStream.end(); tokenStream.close(); return bytesRefs; }
protected List<BytesRef> analyze(String text, String field, Analyzer analyzer) throws IOException { List<BytesRef> bytesRefs = new ArrayList<BytesRef>(); TokenStream tokenStream = analyzer.tokenStream(field, new StringReader(text)); TermToBytesRefAttribute termAttribute = tokenStream.getAttribute(TermToBytesRefAttribute.class); BytesRef bytesRef = termAttribute.getBytesRef(); while (tokenStream.incrementToken()) { termAttribute.fillBytesRef(); bytesRefs.add(BytesRef.deepCopyOf(bytesRef)); } tokenStream.end(); tokenStream.close(); return bytesRefs; }
private void addTerms(IndexReader reader,FieldVals f) throws IOException { if(f.queryString==null) return; TokenStream ts=analyzer.tokenStream(f.fieldName, new StringReader(f.queryString)); CharTermAttribute termAtt = ts.addAttribute(CharTermAttribute.class); int corpusNumDocs=reader.numDocs(); HashSet<String> processedTerms=new HashSet<String>(); ts.reset(); while (ts.incrementToken()) { String term = termAtt.toString(); if(!processedTerms.contains(term)) { processedTerms.add(term); ScoreTermQueue variantsQ=new ScoreTermQueue(MAX_VARIANTS_PER_TERM); //maxNum variants considered for any one term float minScore=0; Term startTerm=new Term(f.fieldName, term); AttributeSource atts = new AttributeSource(); MaxNonCompetitiveBoostAttribute maxBoostAtt = atts.addAttribute(MaxNonCompetitiveBoostAttribute.class); FuzzyTermsEnum fe = new FuzzyTermsEnum(MultiFields.getTerms(reader, startTerm.field()), atts, startTerm, f.minSimilarity, f.prefixLength); //store the df so all variants use same idf int df = reader.docFreq(startTerm); int numVariants=0; int totalVariantDocFreqs=0; BytesRef possibleMatch; BoostAttribute boostAtt = fe.attributes().addAttribute(BoostAttribute.class); while ((possibleMatch = fe.next()) != null) { numVariants++; totalVariantDocFreqs+=fe.docFreq(); float score=boostAtt.getBoost(); if (variantsQ.size() < MAX_VARIANTS_PER_TERM || score > minScore){ ScoreTerm st=new ScoreTerm(new Term(startTerm.field(), new BytesRef(possibleMatch)),score,startTerm); variantsQ.insertWithOverflow(st); minScore = variantsQ.top().score; // maintain minScore } maxBoostAtt.setMaxNonCompetitiveBoost(variantsQ.size() >= MAX_VARIANTS_PER_TERM ? minScore : Float.NEGATIVE_INFINITY); } if(numVariants>0) { int avgDf=totalVariantDocFreqs/numVariants; if(df==0)//no direct match we can use as df for all variants { df=avgDf; //use avg df of all variants } // take the top variants (scored by edit distance) and reset the score // to include an IDF factor then add to the global queue for ranking // overall top query terms int size = variantsQ.size(); for(int i = 0; i < size; i++) { ScoreTerm st = variantsQ.pop(); st.score=(st.score*st.score)*sim.idf(df,corpusNumDocs); q.insertWithOverflow(st); } } } } ts.end(); ts.close(); }
private void addTerms(IndexReader reader,FieldVals f) throws IOException { if(f.queryString==null) return; TokenStream ts=analyzer.tokenStream(f.fieldName, new StringReader(f.queryString)); CharTermAttribute termAtt = ts.addAttribute(CharTermAttribute.class); int corpusNumDocs=reader.numDocs(); HashSet<String> processedTerms=new HashSet<String>(); ts.reset(); while (ts.incrementToken()) { String term = termAtt.toString(); if(!processedTerms.contains(term)) { processedTerms.add(term); ScoreTermQueue variantsQ=new ScoreTermQueue(MAX_VARIANTS_PER_TERM); //maxNum variants considered for any one term float minScore=0; Term startTerm=new Term(f.fieldName, term); AttributeSource atts = new AttributeSource(); MaxNonCompetitiveBoostAttribute maxBoostAtt = atts.addAttribute(MaxNonCompetitiveBoostAttribute.class); FuzzyTermsEnum fe = new FuzzyTermsEnum(MultiFields.getTerms(reader, startTerm.field()), atts, startTerm, f.minSimilarity, f.prefixLength); //store the df so all variants use same idf int df = reader.docFreq(startTerm); int numVariants=0; int totalVariantDocFreqs=0; BytesRef possibleMatch; BoostAttribute boostAtt = fe.attributes().addAttribute(BoostAttribute.class); while ((possibleMatch = fe.next()) != null) { numVariants++; totalVariantDocFreqs+=fe.docFreq(); float score=boostAtt.getBoost(); if (variantsQ.size() < MAX_VARIANTS_PER_TERM || score > minScore){ ScoreTerm st=new ScoreTerm(new Term(startTerm.field(), BytesRef.deepCopyOf(possibleMatch)),score,startTerm); variantsQ.insertWithOverflow(st); minScore = variantsQ.top().score; // maintain minScore } maxBoostAtt.setMaxNonCompetitiveBoost(variantsQ.size() >= MAX_VARIANTS_PER_TERM ? minScore : Float.NEGATIVE_INFINITY); } if(numVariants>0) { int avgDf=totalVariantDocFreqs/numVariants; if(df==0)//no direct match we can use as df for all variants { df=avgDf; //use avg df of all variants } // take the top variants (scored by edit distance) and reset the score // to include an IDF factor then add to the global queue for ranking // overall top query terms int size = variantsQ.size(); for(int i = 0; i < size; i++) { ScoreTerm st = variantsQ.pop(); st.score=(st.score*st.score)*sim.idf(df,corpusNumDocs); q.insertWithOverflow(st); } } } } ts.end(); ts.close(); }
public void testCopy() { int numIters = atLeast(10); for (int i = 0; i < numIters; i++) { CharsRef ref = new CharsRef(); char[] charArray = _TestUtil.randomRealisticUnicodeString(random, 1, 100).toCharArray(); int offset = random.nextInt(charArray.length); int length = charArray.length - offset; String str = new String(charArray, offset, length); ref.copy(charArray, offset, length); assertEquals(str, ref.toString()); } }
public void testCopy() { int numIters = atLeast(10); for (int i = 0; i < numIters; i++) { CharsRef ref = new CharsRef(); char[] charArray = _TestUtil.randomRealisticUnicodeString(random, 1, 100).toCharArray(); int offset = random.nextInt(charArray.length); int length = charArray.length - offset; String str = new String(charArray, offset, length); ref.copyChars(charArray, offset, length); assertEquals(str, ref.toString()); } }
private int countTerms(MultiTermQuery q) throws Exception { final Terms terms = MultiFields.getTerms(reader, q.getField()); if (terms == null) return 0; final TermsEnum termEnum = q.getTermsEnum(terms); assertNotNull(termEnum); int count = 0; BytesRef cur, last = null; while ((cur = termEnum.next()) != null) { count++; if (last != null) { assertTrue(last.compareTo(cur) < 0); } last = new BytesRef(cur); } // LUCENE-3314: the results after next() already returned null are undefined, // assertNull(termEnum.next()); return count; }
private int countTerms(MultiTermQuery q) throws Exception { final Terms terms = MultiFields.getTerms(reader, q.getField()); if (terms == null) return 0; final TermsEnum termEnum = q.getTermsEnum(terms); assertNotNull(termEnum); int count = 0; BytesRef cur, last = null; while ((cur = termEnum.next()) != null) { count++; if (last != null) { assertTrue(last.compareTo(cur) < 0); } last = BytesRef.deepCopyOf(cur); } // LUCENE-3314: the results after next() already returned null are undefined, // assertNull(termEnum.next()); return count; }
public void testIntersect() throws Exception { for (int i = 0; i < numIterations; i++) { String reg = AutomatonTestUtil.randomRegexp(random); Automaton automaton = new RegExp(reg, RegExp.NONE).toAutomaton(); CompiledAutomaton ca = new CompiledAutomaton(automaton, SpecialOperations.isFinite(automaton), false); TermsEnum te = MultiFields.getTerms(reader, "field").intersect(ca, null); Automaton expected = BasicOperations.intersection(termsAutomaton, automaton); TreeSet<BytesRef> found = new TreeSet<BytesRef>(); while (te.next() != null) { found.add(new BytesRef(te.term())); } Automaton actual = DaciukMihovAutomatonBuilder.build(found); assertTrue(BasicOperations.sameLanguage(expected, actual)); } }
public void testIntersect() throws Exception { for (int i = 0; i < numIterations; i++) { String reg = AutomatonTestUtil.randomRegexp(random); Automaton automaton = new RegExp(reg, RegExp.NONE).toAutomaton(); CompiledAutomaton ca = new CompiledAutomaton(automaton, SpecialOperations.isFinite(automaton), false); TermsEnum te = MultiFields.getTerms(reader, "field").intersect(ca, null); Automaton expected = BasicOperations.intersection(termsAutomaton, automaton); TreeSet<BytesRef> found = new TreeSet<BytesRef>(); while (te.next() != null) { found.add(BytesRef.deepCopyOf(te.term())); } Automaton actual = DaciukMihovAutomatonBuilder.build(found); assertTrue(BasicOperations.sameLanguage(expected, actual)); } }
private void checkTermsOrder(IndexReader r, Set<String> allTerms, boolean isTop) throws IOException { TermsEnum terms = MultiFields.getFields(r).terms("f").iterator(null); BytesRef last = new BytesRef(); Set<String> seenTerms = new HashSet<String>(); while(true) { final BytesRef term = terms.next(); if (term == null) { break; } assertTrue(last.compareTo(term) < 0); last.copy(term); final String s = term.utf8ToString(); assertTrue("term " + termDesc(s) + " was not added to index (count=" + allTerms.size() + ")", allTerms.contains(s)); seenTerms.add(s); } if (isTop) { assertTrue(allTerms.equals(seenTerms)); } // Test seeking: Iterator<String> it = seenTerms.iterator(); while(it.hasNext()) { BytesRef tr = new BytesRef(it.next()); assertEquals("seek failed for term=" + termDesc(tr.utf8ToString()), TermsEnum.SeekStatus.FOUND, terms.seekCeil(tr)); } }
private void checkTermsOrder(IndexReader r, Set<String> allTerms, boolean isTop) throws IOException { TermsEnum terms = MultiFields.getFields(r).terms("f").iterator(null); BytesRef last = new BytesRef(); Set<String> seenTerms = new HashSet<String>(); while(true) { final BytesRef term = terms.next(); if (term == null) { break; } assertTrue(last.compareTo(term) < 0); last.copyBytes(term); final String s = term.utf8ToString(); assertTrue("term " + termDesc(s) + " was not added to index (count=" + allTerms.size() + ")", allTerms.contains(s)); seenTerms.add(s); } if (isTop) { assertTrue(allTerms.equals(seenTerms)); } // Test seeking: Iterator<String> it = seenTerms.iterator(); while(it.hasNext()) { BytesRef tr = new BytesRef(it.next()); assertEquals("seek failed for term=" + termDesc(tr.utf8ToString()), TermsEnum.SeekStatus.FOUND, terms.seekCeil(tr)); } }
public BytesRef next() throws IOException { if (queuedBottom != null) { bottomChanged(queuedBottom, false); queuedBottom = null; } BytesRef term = actualEnum.next(); boostAtt.setBoost(actualBoostAtt.getBoost()); final float bottom = maxBoostAtt.getMaxNonCompetitiveBoost(); final BytesRef bottomTerm = maxBoostAtt.getCompetitiveTerm(); if (term != null && (bottom != this.bottom || bottomTerm != this.bottomTerm)) { this.bottom = bottom; this.bottomTerm = bottomTerm; // clone the term before potentially doing something with it // this is a rare but wonderful occurrence anyway queuedBottom = new BytesRef(term); } return term; }
public BytesRef next() throws IOException { if (queuedBottom != null) { bottomChanged(queuedBottom, false); queuedBottom = null; } BytesRef term = actualEnum.next(); boostAtt.setBoost(actualBoostAtt.getBoost()); final float bottom = maxBoostAtt.getMaxNonCompetitiveBoost(); final BytesRef bottomTerm = maxBoostAtt.getCompetitiveTerm(); if (term != null && (bottom != this.bottom || bottomTerm != this.bottomTerm)) { this.bottom = bottom; this.bottomTerm = bottomTerm; // clone the term before potentially doing something with it // this is a rare but wonderful occurrence anyway queuedBottom = BytesRef.deepCopyOf(term); } return term; }
protected void uninvert(final IndexReader reader, final BytesRef termPrefix) throws IOException { //System.out.println("DTO uninvert field=" + field + " prefix=" + termPrefix); final long startTime = System.currentTimeMillis(); prefix = termPrefix == null ? null : new BytesRef(termPrefix); final int maxDoc = reader.maxDoc(); final int[] index = new int[maxDoc]; // immediate term numbers, or the index into the byte[] representing the last number final int[] lastTerm = new int[maxDoc]; // last term we saw for this document final byte[][] bytes = new byte[maxDoc][]; // list of term numbers for the doc (delta encoded vInts) final Terms terms = MultiFields.getTerms(reader, field); if (terms == null) { // No terms return; } final TermsEnum te = terms.iterator(null); final BytesRef seekStart = termPrefix != null ? termPrefix : new BytesRef(); //System.out.println("seekStart=" + seekStart.utf8ToString()); if (te.seekCeil(seekStart) == TermsEnum.SeekStatus.END) { // No terms match return; } // If we need our "term index wrapper", these will be // init'd below: List<BytesRef> indexedTerms = null; PagedBytes indexedTermsBytes = null; boolean testedOrd = false; final Bits liveDocs = MultiFields.getLiveDocs(reader); // we need a minimum of 9 bytes, but round up to 12 since the space would // be wasted with most allocators anyway. byte[] tempArr = new byte[12]; // // enumerate all terms, and build an intermediate form of the un-inverted field. // // During this intermediate form, every document has a (potential) byte[] // and the int[maxDoc()] array either contains the termNumber list directly // or the *end* offset of the termNumber list in it's byte array (for faster // appending and faster creation of the final form). // // idea... if things are too large while building, we could do a range of docs // at a time (but it would be a fair amount slower to build) // could also do ranges in parallel to take advantage of multiple CPUs // OPTIONAL: remap the largest df terms to the lowest 128 (single byte) // values. This requires going over the field first to find the most // frequent terms ahead of time. int termNum = 0; docsEnum = null; // Loop begins with te positioned to first term (we call // seek above): for (;;) { final BytesRef t = te.term(); if (t == null || (termPrefix != null && !t.startsWith(termPrefix))) { break; } //System.out.println("visit term=" + t.utf8ToString() + " " + t + " termNum=" + termNum); if (!testedOrd) { try { ordBase = (int) te.ord(); //System.out.println("got ordBase=" + ordBase); } catch (UnsupportedOperationException uoe) { // Reader cannot provide ord support, so we wrap // our own support by creating our own terms index: indexedTerms = new ArrayList<BytesRef>(); indexedTermsBytes = new PagedBytes(15); //System.out.println("NO ORDS"); } testedOrd = true; } visitTerm(te, termNum); if (indexedTerms != null && (termNum & indexIntervalMask) == 0) { // Index this term sizeOfIndexedStrings += t.length; BytesRef indexedTerm = new BytesRef(); indexedTermsBytes.copy(t, indexedTerm); // TODO: really should 1) strip off useless suffix, // and 2) use FST not array/PagedBytes indexedTerms.add(indexedTerm); } final int df = te.docFreq(); if (df <= maxTermDocFreq) { docsEnum = te.docs(liveDocs, docsEnum); final DocsEnum.BulkReadResult bulkResult = docsEnum.getBulkResult(); // dF, but takes deletions into account int actualDF = 0; for (;;) { int chunk = docsEnum.read(); if (chunk <= 0) { break; } //System.out.println(" chunk=" + chunk + " docs"); actualDF += chunk; for (int i=0; i<chunk; i++) { termInstances++; int doc = bulkResult.docs.ints[i]; //System.out.println(" docID=" + doc); // add TNUM_OFFSET to the term number to make room for special reserved values: // 0 (end term) and 1 (index into byte array follows) int delta = termNum - lastTerm[doc] + TNUM_OFFSET; lastTerm[doc] = termNum; int val = index[doc]; if ((val & 0xff)==1) { // index into byte array (actually the end of // the doc-specific byte[] when building) int pos = val >>> 8; int ilen = vIntSize(delta); byte[] arr = bytes[doc]; int newend = pos+ilen; if (newend > arr.length) { // We avoid a doubling strategy to lower memory usage. // this faceting method isn't for docs with many terms. // In hotspot, objects have 2 words of overhead, then fields, rounded up to a 64-bit boundary. // TODO: figure out what array lengths we can round up to w/o actually using more memory // (how much space does a byte[] take up? Is data preceded by a 32 bit length only? // It should be safe to round up to the nearest 32 bits in any case. int newLen = (newend + 3) & 0xfffffffc; // 4 byte alignment byte[] newarr = new byte[newLen]; System.arraycopy(arr, 0, newarr, 0, pos); arr = newarr; bytes[doc] = newarr; } pos = writeInt(delta, arr, pos); index[doc] = (pos<<8) | 1; // update pointer to end index in byte[] } else { // OK, this int has data in it... find the end (a zero starting byte - not // part of another number, hence not following a byte with the high bit set). int ipos; if (val==0) { ipos=0; } else if ((val & 0x0000ff80)==0) { ipos=1; } else if ((val & 0x00ff8000)==0) { ipos=2; } else if ((val & 0xff800000)==0) { ipos=3; } else { ipos=4; } //System.out.println(" ipos=" + ipos); int endPos = writeInt(delta, tempArr, ipos); //System.out.println(" endpos=" + endPos); if (endPos <= 4) { //System.out.println(" fits!"); // value will fit in the integer... move bytes back for (int j=ipos; j<endPos; j++) { val |= (tempArr[j] & 0xff) << (j<<3); } index[doc] = val; } else { // value won't fit... move integer into byte[] for (int j=0; j<ipos; j++) { tempArr[j] = (byte)val; val >>>=8; } // point at the end index in the byte[] index[doc] = (endPos<<8) | 1; bytes[doc] = tempArr; tempArr = new byte[12]; } } } } setActualDocFreq(termNum, actualDF); } termNum++; if (te.next() == null) { break; } } numTermsInField = termNum; long midPoint = System.currentTimeMillis(); if (termInstances == 0) { // we didn't invert anything // lower memory consumption. tnums = null; } else { this.index = index; // // transform intermediate form into the final form, building a single byte[] // at a time, and releasing the intermediate byte[]s as we go to avoid // increasing the memory footprint. // for (int pass = 0; pass<256; pass++) { byte[] target = tnums[pass]; int pos=0; // end in target; if (target != null) { pos = target.length; } else { target = new byte[4096]; } // loop over documents, 0x00ppxxxx, 0x01ppxxxx, 0x02ppxxxx // where pp is the pass (which array we are building), and xx is all values. // each pass shares the same byte[] for termNumber lists. for (int docbase = pass<<16; docbase<maxDoc; docbase+=(1<<24)) { int lim = Math.min(docbase + (1<<16), maxDoc); for (int doc=docbase; doc<lim; doc++) { //System.out.println(" pass=" + pass + " process docID=" + doc); int val = index[doc]; if ((val&0xff) == 1) { int len = val >>> 8; //System.out.println(" ptr pos=" + pos); index[doc] = (pos<<8)|1; // change index to point to start of array if ((pos & 0xff000000) != 0) { // we only have 24 bits for the array index throw new IllegalStateException("Too many values for UnInvertedField faceting on field "+field); } byte[] arr = bytes[doc]; /* for(byte b : arr) { //System.out.println(" b=" + Integer.toHexString((int) b)); } */ bytes[doc] = null; // IMPORTANT: allow GC to avoid OOM if (target.length <= pos + len) { int newlen = target.length; /*** we don't have to worry about the array getting too large * since the "pos" param will overflow first (only 24 bits available) if ((newlen<<1) <= 0) { // overflow... newlen = Integer.MAX_VALUE; if (newlen <= pos + len) { throw new SolrException(400,"Too many terms to uninvert field!"); } } else { while (newlen <= pos + len) newlen<<=1; // doubling strategy } ****/ while (newlen <= pos + len) newlen<<=1; // doubling strategy byte[] newtarget = new byte[newlen]; System.arraycopy(target, 0, newtarget, 0, pos); target = newtarget; } System.arraycopy(arr, 0, target, pos, len); pos += len + 1; // skip single byte at end and leave it 0 for terminator } } } // shrink array if (pos < target.length) { byte[] newtarget = new byte[pos]; System.arraycopy(target, 0, newtarget, 0, pos); target = newtarget; } tnums[pass] = target; if ((pass << 16) > maxDoc) break; } if (indexedTerms != null) { indexedTermsArray = indexedTerms.toArray(new BytesRef[indexedTerms.size()]); } } long endTime = System.currentTimeMillis(); total_time = (int)(endTime-startTime); phase1_time = (int)(midPoint-startTime); }
protected void uninvert(final IndexReader reader, final BytesRef termPrefix) throws IOException { //System.out.println("DTO uninvert field=" + field + " prefix=" + termPrefix); final long startTime = System.currentTimeMillis(); prefix = termPrefix == null ? null : BytesRef.deepCopyOf(termPrefix); final int maxDoc = reader.maxDoc(); final int[] index = new int[maxDoc]; // immediate term numbers, or the index into the byte[] representing the last number final int[] lastTerm = new int[maxDoc]; // last term we saw for this document final byte[][] bytes = new byte[maxDoc][]; // list of term numbers for the doc (delta encoded vInts) final Terms terms = MultiFields.getTerms(reader, field); if (terms == null) { // No terms return; } final TermsEnum te = terms.iterator(null); final BytesRef seekStart = termPrefix != null ? termPrefix : new BytesRef(); //System.out.println("seekStart=" + seekStart.utf8ToString()); if (te.seekCeil(seekStart) == TermsEnum.SeekStatus.END) { // No terms match return; } // If we need our "term index wrapper", these will be // init'd below: List<BytesRef> indexedTerms = null; PagedBytes indexedTermsBytes = null; boolean testedOrd = false; final Bits liveDocs = MultiFields.getLiveDocs(reader); // we need a minimum of 9 bytes, but round up to 12 since the space would // be wasted with most allocators anyway. byte[] tempArr = new byte[12]; // // enumerate all terms, and build an intermediate form of the un-inverted field. // // During this intermediate form, every document has a (potential) byte[] // and the int[maxDoc()] array either contains the termNumber list directly // or the *end* offset of the termNumber list in it's byte array (for faster // appending and faster creation of the final form). // // idea... if things are too large while building, we could do a range of docs // at a time (but it would be a fair amount slower to build) // could also do ranges in parallel to take advantage of multiple CPUs // OPTIONAL: remap the largest df terms to the lowest 128 (single byte) // values. This requires going over the field first to find the most // frequent terms ahead of time. int termNum = 0; docsEnum = null; // Loop begins with te positioned to first term (we call // seek above): for (;;) { final BytesRef t = te.term(); if (t == null || (termPrefix != null && !t.startsWith(termPrefix))) { break; } //System.out.println("visit term=" + t.utf8ToString() + " " + t + " termNum=" + termNum); if (!testedOrd) { try { ordBase = (int) te.ord(); //System.out.println("got ordBase=" + ordBase); } catch (UnsupportedOperationException uoe) { // Reader cannot provide ord support, so we wrap // our own support by creating our own terms index: indexedTerms = new ArrayList<BytesRef>(); indexedTermsBytes = new PagedBytes(15); //System.out.println("NO ORDS"); } testedOrd = true; } visitTerm(te, termNum); if (indexedTerms != null && (termNum & indexIntervalMask) == 0) { // Index this term sizeOfIndexedStrings += t.length; BytesRef indexedTerm = new BytesRef(); indexedTermsBytes.copy(t, indexedTerm); // TODO: really should 1) strip off useless suffix, // and 2) use FST not array/PagedBytes indexedTerms.add(indexedTerm); } final int df = te.docFreq(); if (df <= maxTermDocFreq) { docsEnum = te.docs(liveDocs, docsEnum); final DocsEnum.BulkReadResult bulkResult = docsEnum.getBulkResult(); // dF, but takes deletions into account int actualDF = 0; for (;;) { int chunk = docsEnum.read(); if (chunk <= 0) { break; } //System.out.println(" chunk=" + chunk + " docs"); actualDF += chunk; for (int i=0; i<chunk; i++) { termInstances++; int doc = bulkResult.docs.ints[i]; //System.out.println(" docID=" + doc); // add TNUM_OFFSET to the term number to make room for special reserved values: // 0 (end term) and 1 (index into byte array follows) int delta = termNum - lastTerm[doc] + TNUM_OFFSET; lastTerm[doc] = termNum; int val = index[doc]; if ((val & 0xff)==1) { // index into byte array (actually the end of // the doc-specific byte[] when building) int pos = val >>> 8; int ilen = vIntSize(delta); byte[] arr = bytes[doc]; int newend = pos+ilen; if (newend > arr.length) { // We avoid a doubling strategy to lower memory usage. // this faceting method isn't for docs with many terms. // In hotspot, objects have 2 words of overhead, then fields, rounded up to a 64-bit boundary. // TODO: figure out what array lengths we can round up to w/o actually using more memory // (how much space does a byte[] take up? Is data preceded by a 32 bit length only? // It should be safe to round up to the nearest 32 bits in any case. int newLen = (newend + 3) & 0xfffffffc; // 4 byte alignment byte[] newarr = new byte[newLen]; System.arraycopy(arr, 0, newarr, 0, pos); arr = newarr; bytes[doc] = newarr; } pos = writeInt(delta, arr, pos); index[doc] = (pos<<8) | 1; // update pointer to end index in byte[] } else { // OK, this int has data in it... find the end (a zero starting byte - not // part of another number, hence not following a byte with the high bit set). int ipos; if (val==0) { ipos=0; } else if ((val & 0x0000ff80)==0) { ipos=1; } else if ((val & 0x00ff8000)==0) { ipos=2; } else if ((val & 0xff800000)==0) { ipos=3; } else { ipos=4; } //System.out.println(" ipos=" + ipos); int endPos = writeInt(delta, tempArr, ipos); //System.out.println(" endpos=" + endPos); if (endPos <= 4) { //System.out.println(" fits!"); // value will fit in the integer... move bytes back for (int j=ipos; j<endPos; j++) { val |= (tempArr[j] & 0xff) << (j<<3); } index[doc] = val; } else { // value won't fit... move integer into byte[] for (int j=0; j<ipos; j++) { tempArr[j] = (byte)val; val >>>=8; } // point at the end index in the byte[] index[doc] = (endPos<<8) | 1; bytes[doc] = tempArr; tempArr = new byte[12]; } } } } setActualDocFreq(termNum, actualDF); } termNum++; if (te.next() == null) { break; } } numTermsInField = termNum; long midPoint = System.currentTimeMillis(); if (termInstances == 0) { // we didn't invert anything // lower memory consumption. tnums = null; } else { this.index = index; // // transform intermediate form into the final form, building a single byte[] // at a time, and releasing the intermediate byte[]s as we go to avoid // increasing the memory footprint. // for (int pass = 0; pass<256; pass++) { byte[] target = tnums[pass]; int pos=0; // end in target; if (target != null) { pos = target.length; } else { target = new byte[4096]; } // loop over documents, 0x00ppxxxx, 0x01ppxxxx, 0x02ppxxxx // where pp is the pass (which array we are building), and xx is all values. // each pass shares the same byte[] for termNumber lists. for (int docbase = pass<<16; docbase<maxDoc; docbase+=(1<<24)) { int lim = Math.min(docbase + (1<<16), maxDoc); for (int doc=docbase; doc<lim; doc++) { //System.out.println(" pass=" + pass + " process docID=" + doc); int val = index[doc]; if ((val&0xff) == 1) { int len = val >>> 8; //System.out.println(" ptr pos=" + pos); index[doc] = (pos<<8)|1; // change index to point to start of array if ((pos & 0xff000000) != 0) { // we only have 24 bits for the array index throw new IllegalStateException("Too many values for UnInvertedField faceting on field "+field); } byte[] arr = bytes[doc]; /* for(byte b : arr) { //System.out.println(" b=" + Integer.toHexString((int) b)); } */ bytes[doc] = null; // IMPORTANT: allow GC to avoid OOM if (target.length <= pos + len) { int newlen = target.length; /*** we don't have to worry about the array getting too large * since the "pos" param will overflow first (only 24 bits available) if ((newlen<<1) <= 0) { // overflow... newlen = Integer.MAX_VALUE; if (newlen <= pos + len) { throw new SolrException(400,"Too many terms to uninvert field!"); } } else { while (newlen <= pos + len) newlen<<=1; // doubling strategy } ****/ while (newlen <= pos + len) newlen<<=1; // doubling strategy byte[] newtarget = new byte[newlen]; System.arraycopy(target, 0, newtarget, 0, pos); target = newtarget; } System.arraycopy(arr, 0, target, pos, len); pos += len + 1; // skip single byte at end and leave it 0 for terminator } } } // shrink array if (pos < target.length) { byte[] newtarget = new byte[pos]; System.arraycopy(target, 0, newtarget, 0, pos); target = newtarget; } tnums[pass] = target; if ((pass << 16) > maxDoc) break; } if (indexedTerms != null) { indexedTermsArray = indexedTerms.toArray(new BytesRef[indexedTerms.size()]); } } long endTime = System.currentTimeMillis(); total_time = (int)(endTime-startTime); phase1_time = (int)(midPoint-startTime); }
public SeekStatus seekCeil(BytesRef term, boolean useCache) throws IOException { queue.clear(); numTop = 0; lastSeekExact = false; boolean seekOpt = false; if (lastSeek != null && termComp.compare(lastSeek, term) <= 0) { seekOpt = true; } lastSeekScratch.copy(term); lastSeek = lastSeekScratch; for(int i=0;i<numSubs;i++) { final SeekStatus status; // LUCENE-2130: if we had just seek'd already, prior // to this seek, and the new seek term is after the // previous one, don't try to re-seek this sub if its // current term is already beyond this new seek term. // Doing so is a waste because this sub will simply // seek to the same spot. if (seekOpt) { final BytesRef curTerm = currentSubs[i].current; if (curTerm != null) { final int cmp = termComp.compare(term, curTerm); if (cmp == 0) { status = SeekStatus.FOUND; } else if (cmp < 0) { status = SeekStatus.NOT_FOUND; } else { status = currentSubs[i].terms.seekCeil(term, useCache); } } else { status = SeekStatus.END; } } else { status = currentSubs[i].terms.seekCeil(term, useCache); } if (status == SeekStatus.FOUND) { top[numTop++] = currentSubs[i]; current = currentSubs[i].current = currentSubs[i].terms.term(); } else { if (status == SeekStatus.NOT_FOUND) { currentSubs[i].current = currentSubs[i].terms.term(); assert currentSubs[i].current != null; queue.add(currentSubs[i]); } else { // enum exhausted currentSubs[i].current = null; } } } if (numTop > 0) { // at least one sub had exact match to the requested term return SeekStatus.FOUND; } else if (queue.size() > 0) { // no sub had exact match, but at least one sub found // a term after the requested term -- advance to that // next term: pullTop(); return SeekStatus.NOT_FOUND; } else { return SeekStatus.END; } }
public SeekStatus seekCeil(BytesRef term, boolean useCache) throws IOException { queue.clear(); numTop = 0; lastSeekExact = false; boolean seekOpt = false; if (lastSeek != null && termComp.compare(lastSeek, term) <= 0) { seekOpt = true; } lastSeekScratch.copyBytes(term); lastSeek = lastSeekScratch; for(int i=0;i<numSubs;i++) { final SeekStatus status; // LUCENE-2130: if we had just seek'd already, prior // to this seek, and the new seek term is after the // previous one, don't try to re-seek this sub if its // current term is already beyond this new seek term. // Doing so is a waste because this sub will simply // seek to the same spot. if (seekOpt) { final BytesRef curTerm = currentSubs[i].current; if (curTerm != null) { final int cmp = termComp.compare(term, curTerm); if (cmp == 0) { status = SeekStatus.FOUND; } else if (cmp < 0) { status = SeekStatus.NOT_FOUND; } else { status = currentSubs[i].terms.seekCeil(term, useCache); } } else { status = SeekStatus.END; } } else { status = currentSubs[i].terms.seekCeil(term, useCache); } if (status == SeekStatus.FOUND) { top[numTop++] = currentSubs[i]; current = currentSubs[i].current = currentSubs[i].terms.term(); } else { if (status == SeekStatus.NOT_FOUND) { currentSubs[i].current = currentSubs[i].terms.term(); assert currentSubs[i].current != null; queue.add(currentSubs[i]); } else { // enum exhausted currentSubs[i].current = null; } } } if (numTop > 0) { // at least one sub had exact match to the requested term return SeekStatus.FOUND; } else if (queue.size() > 0) { // no sub had exact match, but at least one sub found // a term after the requested term -- advance to that // next term: pullTop(); return SeekStatus.NOT_FOUND; } else { return SeekStatus.END; } }
protected BytesRef nextSeekTerm(final BytesRef term) throws IOException { //System.out.println("ATE.nextSeekTerm term=" + term); if (term == null) { assert seekBytesRef.length == 0; // return the empty term, as its valid if (runAutomaton.isAccept(runAutomaton.getInitialState())) { return seekBytesRef; } } else { seekBytesRef.copy(term); } // seek to the next possible string; if (nextString()) { return seekBytesRef; // reposition } else { return null; // no more possible strings can match } }
protected BytesRef nextSeekTerm(final BytesRef term) throws IOException { //System.out.println("ATE.nextSeekTerm term=" + term); if (term == null) { assert seekBytesRef.length == 0; // return the empty term, as its valid if (runAutomaton.isAccept(runAutomaton.getInitialState())) { return seekBytesRef; } } else { seekBytesRef.copyBytes(term); } // seek to the next possible string; if (nextString()) { return seekBytesRef; // reposition } else { return null; // no more possible strings can match } }
private boolean checkDeleteTerm(Term term) { if (term != null) { assert lastDeleteTerm == null || term.compareTo(lastDeleteTerm) > 0: "lastTerm=" + lastDeleteTerm + " vs term=" + term; } // TODO: we re-use term now in our merged iterable, but we shouldn't clone, instead copy for this assert lastDeleteTerm = term == null ? null : new Term(term.field(), new BytesRef(term.bytes)); return true; }
private boolean checkDeleteTerm(Term term) { if (term != null) { assert lastDeleteTerm == null || term.compareTo(lastDeleteTerm) > 0: "lastTerm=" + lastDeleteTerm + " vs term=" + term; } // TODO: we re-use term now in our merged iterable, but we shouldn't clone, instead copy for this assert lastDeleteTerm = term == null ? null : new Term(term.field(), BytesRef.deepCopyOf(term.bytes)); return true; }
public void startTerm(BytesRef term, int freq) throws IOException { final int prefix = StringHelper.bytesDifference(lastTerm.bytes, lastTerm.offset, lastTerm.length, term.bytes, term.offset, term.length); final int suffix = term.length - prefix; tvf.writeVInt(prefix); tvf.writeVInt(suffix); tvf.writeBytes(term.bytes, term.offset + prefix, suffix); tvf.writeVInt(freq); lastTerm.copy(term); lastPosition = lastOffset = 0; if (offsets && positions) { // we might need to buffer if its a non-bulk merge offsetStartBuffer = ArrayUtil.grow(offsetStartBuffer, freq); offsetEndBuffer = ArrayUtil.grow(offsetEndBuffer, freq); offsetIndex = 0; offsetFreq = freq; } }
public void startTerm(BytesRef term, int freq) throws IOException { final int prefix = StringHelper.bytesDifference(lastTerm.bytes, lastTerm.offset, lastTerm.length, term.bytes, term.offset, term.length); final int suffix = term.length - prefix; tvf.writeVInt(prefix); tvf.writeVInt(suffix); tvf.writeBytes(term.bytes, term.offset + prefix, suffix); tvf.writeVInt(freq); lastTerm.copyBytes(term); lastPosition = lastOffset = 0; if (offsets && positions) { // we might need to buffer if its a non-bulk merge offsetStartBuffer = ArrayUtil.grow(offsetStartBuffer, freq); offsetEndBuffer = ArrayUtil.grow(offsetEndBuffer, freq); offsetIndex = 0; offsetFreq = freq; } }
// this new arc is private to this new input; set its // arc output to the leftover output: frontier[prefixLenPlus1-1].setLastOutput(input.ints[input.offset + prefixLenPlus1-1], output); } // save last input lastInput.copy(input); //System.out.println(" count[0]=" + frontier[0].inputCount); }
// this new arc is private to this new input; set its // arc output to the leftover output: frontier[prefixLenPlus1-1].setLastOutput(input.ints[input.offset + prefixLenPlus1-1], output); } // save last input lastInput.copyInts(input); //System.out.println(" count[0]=" + frontier[0].inputCount); }
private boolean setPrevious(CharsRef current) { // don't need to copy, once we fix https://issues.apache.org/jira/browse/LUCENE-3277 // still, called only from assert previous = new CharsRef(current); return true;
private boolean setPrevious(CharsRef current) { // don't need to copy, once we fix https://issues.apache.org/jira/browse/LUCENE-3277 // still, called only from assert previous = CharsRef.deepCopyOf(current); return true;
public CharsRef indexedToReadable(BytesRef input, CharsRef charsRef) { // TODO: this could be more efficient, but the sortable types should be deprecated instead final char[] indexedToReadable = indexedToReadable(input.utf8ToChars(charsRef).toString()).toCharArray(); charsRef.copy(indexedToReadable, 0, indexedToReadable.length); return charsRef; }
public CharsRef indexedToReadable(BytesRef input, CharsRef charsRef) { // TODO: this could be more efficient, but the sortable types should be deprecated instead final char[] indexedToReadable = indexedToReadable(input.utf8ToChars(charsRef).toString()).toCharArray(); charsRef.copyChars(indexedToReadable, 0, indexedToReadable.length); return charsRef; }
public CharsRef indexedToReadable(BytesRef input, CharsRef charsRef) { // TODO: this could be more efficient, but the sortable types should be deprecated instead input.utf8ToChars(charsRef); final char[] indexedToReadable = indexedToReadable(charsRef.toString()).toCharArray(); charsRef.copy(indexedToReadable, 0, indexedToReadable.length); return charsRef; }
public CharsRef indexedToReadable(BytesRef input, CharsRef charsRef) { // TODO: this could be more efficient, but the sortable types should be deprecated instead input.utf8ToChars(charsRef); final char[] indexedToReadable = indexedToReadable(charsRef.toString()).toCharArray(); charsRef.copyChars(indexedToReadable, 0, indexedToReadable.length); return charsRef; }
private BytesRef analyzeRangePart(String field, String part) { TokenStream source; try { source = analyzer.tokenStream(field, new StringReader(part)); source.reset(); } catch (IOException e) { throw new RuntimeException("Unable to initialize TokenStream to analyze range part: " + part, e); } TermToBytesRefAttribute termAtt = source.getAttribute(TermToBytesRefAttribute.class); BytesRef bytes = termAtt.getBytesRef(); // we control the analyzer here: most errors are impossible try { if (!source.incrementToken()) throw new IllegalArgumentException("analyzer returned no terms for range part: " + part); termAtt.fillBytesRef(); assert !source.incrementToken(); } catch (IOException e) { throw new RuntimeException("error analyzing range part: " + part, e); } try { source.end(); source.close(); } catch (IOException e) { throw new RuntimeException("Unable to end & close TokenStream after analyzing range part: " + part, e); } return new BytesRef(bytes); }
private BytesRef analyzeRangePart(String field, String part) { TokenStream source; try { source = analyzer.tokenStream(field, new StringReader(part)); source.reset(); } catch (IOException e) { throw new RuntimeException("Unable to initialize TokenStream to analyze range part: " + part, e); } TermToBytesRefAttribute termAtt = source.getAttribute(TermToBytesRefAttribute.class); BytesRef bytes = termAtt.getBytesRef(); // we control the analyzer here: most errors are impossible try { if (!source.incrementToken()) throw new IllegalArgumentException("analyzer returned no terms for range part: " + part); termAtt.fillBytesRef(); assert !source.incrementToken(); } catch (IOException e) { throw new RuntimeException("error analyzing range part: " + part, e); } try { source.end(); source.close(); } catch (IOException e) { throw new RuntimeException("Unable to end & close TokenStream after analyzing range part: " + part, e); } return BytesRef.deepCopyOf(bytes); }
public Query parse() throws ParseException { String fname = localParams.get(QueryParsing.F); FieldType ft = req.getSchema().getFieldTypeNoEx(fname); String val = localParams.get(QueryParsing.V); BytesRef term = new BytesRef(); if (ft != null) { ft.readableToIndexed(val, term); } else { term.copy(val); } return new TermQuery(new Term(fname, term)); } }; }
public Query parse() throws ParseException { String fname = localParams.get(QueryParsing.F); FieldType ft = req.getSchema().getFieldTypeNoEx(fname); String val = localParams.get(QueryParsing.V); BytesRef term = new BytesRef(); if (ft != null) { ft.readableToIndexed(val, term); } else { term.copyChars(val); } return new TermQuery(new Term(fname, term)); } }; }
public DocSet getDocSet(DocsEnumState deState) throws IOException { int largestPossible = deState.termsEnum.docFreq(); boolean useCache = filterCache != null && largestPossible >= deState.minSetSizeCached; TermQuery key = null; if (useCache) { key = new TermQuery(new Term(deState.fieldName, new BytesRef(deState.termsEnum.term()))); DocSet result = filterCache.get(key); if (result != null) return result; } int smallSetSize = maxDoc()>>6; int scratchSize = Math.min(smallSetSize, largestPossible); if (deState.scratch == null || deState.scratch.length < scratchSize) deState.scratch = new int[scratchSize]; final int[] docs = deState.scratch; int upto = 0; int bitsSet = 0; OpenBitSet obs = null; DocsEnum docsEnum = deState.termsEnum.docs(deState.liveDocs, deState.docsEnum); if (deState.docsEnum == null) { deState.docsEnum = docsEnum; } if (docsEnum instanceof MultiDocsEnum) { MultiDocsEnum.EnumWithSlice[] subs = ((MultiDocsEnum)docsEnum).getSubs(); int numSubs = ((MultiDocsEnum)docsEnum).getNumSubs(); for (int subindex = 0; subindex<numSubs; subindex++) { MultiDocsEnum.EnumWithSlice sub = subs[subindex]; if (sub.docsEnum == null) continue; DocsEnum.BulkReadResult bulk = sub.docsEnum.getBulkResult(); int base = sub.slice.start; for (;;) { int nDocs = sub.docsEnum.read(); if (nDocs == 0) break; int[] docArr = bulk.docs.ints; int end = bulk.docs.offset + nDocs; if (upto + nDocs > docs.length) { if (obs == null) obs = new OpenBitSet(maxDoc()); for (int i=bulk.docs.offset; i<end; i++) { obs.fastSet(docArr[i]+base); } bitsSet += nDocs; } else { for (int i=bulk.docs.offset; i<end; i++) { docs[upto++] = docArr[i]+base; } } } } } else { DocsEnum.BulkReadResult bulk = docsEnum.getBulkResult(); for (;;) { int nDocs = docsEnum.read(); if (nDocs == 0) break; int[] docArr = bulk.docs.ints; int end = bulk.docs.offset + nDocs; if (upto + nDocs > docs.length) { if (obs == null) obs = new OpenBitSet(maxDoc()); for (int i=bulk.docs.offset; i<end; i++) { obs.fastSet(docArr[i]); } bitsSet += nDocs; } else { for (int i=bulk.docs.offset; i<end; i++) { docs[upto++] = docArr[i]; } } } } DocSet result; if (obs != null) { for (int i=0; i<upto; i++) { obs.fastSet(docs[i]); } bitsSet += upto; result = new BitDocSet(obs, bitsSet); } else { result = upto==0 ? DocSet.EMPTY : new SortedIntDocSet(Arrays.copyOf(docs, upto)); } if (useCache) { filterCache.put(key, result); } return result; }
public DocSet getDocSet(DocsEnumState deState) throws IOException { int largestPossible = deState.termsEnum.docFreq(); boolean useCache = filterCache != null && largestPossible >= deState.minSetSizeCached; TermQuery key = null; if (useCache) { key = new TermQuery(new Term(deState.fieldName, BytesRef.deepCopyOf(deState.termsEnum.term()))); DocSet result = filterCache.get(key); if (result != null) return result; } int smallSetSize = maxDoc()>>6; int scratchSize = Math.min(smallSetSize, largestPossible); if (deState.scratch == null || deState.scratch.length < scratchSize) deState.scratch = new int[scratchSize]; final int[] docs = deState.scratch; int upto = 0; int bitsSet = 0; OpenBitSet obs = null; DocsEnum docsEnum = deState.termsEnum.docs(deState.liveDocs, deState.docsEnum); if (deState.docsEnum == null) { deState.docsEnum = docsEnum; } if (docsEnum instanceof MultiDocsEnum) { MultiDocsEnum.EnumWithSlice[] subs = ((MultiDocsEnum)docsEnum).getSubs(); int numSubs = ((MultiDocsEnum)docsEnum).getNumSubs(); for (int subindex = 0; subindex<numSubs; subindex++) { MultiDocsEnum.EnumWithSlice sub = subs[subindex]; if (sub.docsEnum == null) continue; DocsEnum.BulkReadResult bulk = sub.docsEnum.getBulkResult(); int base = sub.slice.start; for (;;) { int nDocs = sub.docsEnum.read(); if (nDocs == 0) break; int[] docArr = bulk.docs.ints; int end = bulk.docs.offset + nDocs; if (upto + nDocs > docs.length) { if (obs == null) obs = new OpenBitSet(maxDoc()); for (int i=bulk.docs.offset; i<end; i++) { obs.fastSet(docArr[i]+base); } bitsSet += nDocs; } else { for (int i=bulk.docs.offset; i<end; i++) { docs[upto++] = docArr[i]+base; } } } } } else { DocsEnum.BulkReadResult bulk = docsEnum.getBulkResult(); for (;;) { int nDocs = docsEnum.read(); if (nDocs == 0) break; int[] docArr = bulk.docs.ints; int end = bulk.docs.offset + nDocs; if (upto + nDocs > docs.length) { if (obs == null) obs = new OpenBitSet(maxDoc()); for (int i=bulk.docs.offset; i<end; i++) { obs.fastSet(docArr[i]); } bitsSet += nDocs; } else { for (int i=bulk.docs.offset; i<end; i++) { docs[upto++] = docArr[i]; } } } } DocSet result; if (obs != null) { for (int i=0; i<upto; i++) { obs.fastSet(docs[i]); } bitsSet += upto; result = new BitDocSet(obs, bitsSet); } else { result = upto==0 ? DocSet.EMPTY : new SortedIntDocSet(Arrays.copyOf(docs, upto)); } if (useCache) { filterCache.put(key, result); } return result; }
public NamedList<Integer> getFacetTermEnumCounts(SolrIndexSearcher searcher, DocSet docs, String field, int offset, int limit, int mincount, boolean missing, String sort, String prefix) throws IOException { /* :TODO: potential optimization... * cache the Terms with the highest docFreq and try them first * don't enum if we get our max from them */ // Minimum term docFreq in order to use the filterCache for that term. int minDfFilterCache = params.getFieldInt(field, FacetParams.FACET_ENUM_CACHE_MINDF, 0); // make sure we have a set that is fast for random access, if we will use it for that DocSet fastForRandomSet = docs; if (minDfFilterCache>0 && docs instanceof SortedIntDocSet) { SortedIntDocSet sset = (SortedIntDocSet)docs; fastForRandomSet = new HashDocSet(sset.getDocs(), 0, sset.size()); } IndexSchema schema = searcher.getSchema(); IndexReader r = searcher.getIndexReader(); FieldType ft = schema.getFieldType(field); boolean sortByCount = sort.equals("count") || sort.equals("true"); final int maxsize = limit>=0 ? offset+limit : Integer.MAX_VALUE-1; final BoundedTreeSet<CountPair<BytesRef,Integer>> queue = sortByCount ? new BoundedTreeSet<CountPair<BytesRef,Integer>>(maxsize) : null; final NamedList<Integer> res = new NamedList<Integer>(); int min=mincount-1; // the smallest value in the top 'N' values int off=offset; int lim=limit>=0 ? limit : Integer.MAX_VALUE; BytesRef startTermBytes = null; if (prefix != null) { String indexedPrefix = ft.toInternal(prefix); startTermBytes = new BytesRef(indexedPrefix); } Fields fields = MultiFields.getFields(r); Terms terms = fields==null ? null : fields.terms(field); TermsEnum termsEnum = null; SolrIndexSearcher.DocsEnumState deState = null; BytesRef term = null; if (terms != null) { termsEnum = terms.iterator(null); // TODO: OPT: if seek(ord) is supported for this termsEnum, then we could use it for // facet.offset when sorting by index order. if (startTermBytes != null) { if (termsEnum.seekCeil(startTermBytes, true) == TermsEnum.SeekStatus.END) { termsEnum = null; } else { term = termsEnum.term(); } } else { // position termsEnum on first term term = termsEnum.next(); } } DocsEnum docsEnum = null; CharsRef charsRef = new CharsRef(10); if (docs.size() >= mincount) { while (term != null) { if (startTermBytes != null && !term.startsWith(startTermBytes)) break; int df = termsEnum.docFreq(); // If we are sorting, we can use df>min (rather than >=) since we // are going in index order. For certain term distributions this can // make a large difference (for example, many terms with df=1). if (df>0 && df>min) { int c; if (df >= minDfFilterCache) { // use the filter cache if (deState==null) { deState = new SolrIndexSearcher.DocsEnumState(); deState.fieldName = field; deState.liveDocs = MultiFields.getLiveDocs(r); deState.termsEnum = termsEnum; deState.docsEnum = docsEnum; } c = searcher.numDocs(docs, deState); docsEnum = deState.docsEnum; } else { // iterate over TermDocs to calculate the intersection // TODO: specialize when base docset is a bitset or hash set (skipDocs)? or does it matter for this? // TODO: do this per-segment for better efficiency (MultiDocsEnum just uses base class impl) // TODO: would passing deleted docs lead to better efficiency over checking the fastForRandomSet? docsEnum = termsEnum.docs(null, docsEnum); c=0; if (docsEnum instanceof MultiDocsEnum) { MultiDocsEnum.EnumWithSlice[] subs = ((MultiDocsEnum)docsEnum).getSubs(); int numSubs = ((MultiDocsEnum)docsEnum).getNumSubs(); for (int subindex = 0; subindex<numSubs; subindex++) { MultiDocsEnum.EnumWithSlice sub = subs[subindex]; if (sub.docsEnum == null) continue; DocsEnum.BulkReadResult bulk = sub.docsEnum.getBulkResult(); int base = sub.slice.start; for (;;) { int nDocs = sub.docsEnum.read(); if (nDocs == 0) break; int[] docArr = bulk.docs.ints; // this might be movable outside the loop, but perhaps not worth the risk. int end = bulk.docs.offset + nDocs; for (int i=bulk.docs.offset; i<end; i++) { if (fastForRandomSet.exists(docArr[i]+base)) c++; } } } } else { // this should be the same bulk result object if sharing of the docsEnum succeeded DocsEnum.BulkReadResult bulk = docsEnum.getBulkResult(); for (;;) { int nDocs = docsEnum.read(); if (nDocs == 0) break; int[] docArr = bulk.docs.ints; // this might be movable outside the loop, but perhaps not worth the risk. int end = bulk.docs.offset + nDocs; for (int i=bulk.docs.offset; i<end; i++) { if (fastForRandomSet.exists(docArr[i])) c++; } } } } if (sortByCount) { if (c>min) { BytesRef termCopy = new BytesRef(term); queue.add(new CountPair<BytesRef,Integer>(termCopy, c)); if (queue.size()>=maxsize) min=queue.last().val; } } else { if (c >= mincount && --off<0) { if (--lim<0) break; ft.indexedToReadable(term, charsRef); res.add(charsRef.toString(), c); } } } term = termsEnum.next(); } } if (sortByCount) { for (CountPair<BytesRef,Integer> p : queue) { if (--off>=0) continue; if (--lim<0) break; ft.indexedToReadable(p.key, charsRef); res.add(charsRef.toString(), p.val); } } if (missing) { res.add(null, getFieldMissingCount(searcher,docs,field)); } return res; }
public NamedList<Integer> getFacetTermEnumCounts(SolrIndexSearcher searcher, DocSet docs, String field, int offset, int limit, int mincount, boolean missing, String sort, String prefix) throws IOException { /* :TODO: potential optimization... * cache the Terms with the highest docFreq and try them first * don't enum if we get our max from them */ // Minimum term docFreq in order to use the filterCache for that term. int minDfFilterCache = params.getFieldInt(field, FacetParams.FACET_ENUM_CACHE_MINDF, 0); // make sure we have a set that is fast for random access, if we will use it for that DocSet fastForRandomSet = docs; if (minDfFilterCache>0 && docs instanceof SortedIntDocSet) { SortedIntDocSet sset = (SortedIntDocSet)docs; fastForRandomSet = new HashDocSet(sset.getDocs(), 0, sset.size()); } IndexSchema schema = searcher.getSchema(); IndexReader r = searcher.getIndexReader(); FieldType ft = schema.getFieldType(field); boolean sortByCount = sort.equals("count") || sort.equals("true"); final int maxsize = limit>=0 ? offset+limit : Integer.MAX_VALUE-1; final BoundedTreeSet<CountPair<BytesRef,Integer>> queue = sortByCount ? new BoundedTreeSet<CountPair<BytesRef,Integer>>(maxsize) : null; final NamedList<Integer> res = new NamedList<Integer>(); int min=mincount-1; // the smallest value in the top 'N' values int off=offset; int lim=limit>=0 ? limit : Integer.MAX_VALUE; BytesRef startTermBytes = null; if (prefix != null) { String indexedPrefix = ft.toInternal(prefix); startTermBytes = new BytesRef(indexedPrefix); } Fields fields = MultiFields.getFields(r); Terms terms = fields==null ? null : fields.terms(field); TermsEnum termsEnum = null; SolrIndexSearcher.DocsEnumState deState = null; BytesRef term = null; if (terms != null) { termsEnum = terms.iterator(null); // TODO: OPT: if seek(ord) is supported for this termsEnum, then we could use it for // facet.offset when sorting by index order. if (startTermBytes != null) { if (termsEnum.seekCeil(startTermBytes, true) == TermsEnum.SeekStatus.END) { termsEnum = null; } else { term = termsEnum.term(); } } else { // position termsEnum on first term term = termsEnum.next(); } } DocsEnum docsEnum = null; CharsRef charsRef = new CharsRef(10); if (docs.size() >= mincount) { while (term != null) { if (startTermBytes != null && !term.startsWith(startTermBytes)) break; int df = termsEnum.docFreq(); // If we are sorting, we can use df>min (rather than >=) since we // are going in index order. For certain term distributions this can // make a large difference (for example, many terms with df=1). if (df>0 && df>min) { int c; if (df >= minDfFilterCache) { // use the filter cache if (deState==null) { deState = new SolrIndexSearcher.DocsEnumState(); deState.fieldName = field; deState.liveDocs = MultiFields.getLiveDocs(r); deState.termsEnum = termsEnum; deState.docsEnum = docsEnum; } c = searcher.numDocs(docs, deState); docsEnum = deState.docsEnum; } else { // iterate over TermDocs to calculate the intersection // TODO: specialize when base docset is a bitset or hash set (skipDocs)? or does it matter for this? // TODO: do this per-segment for better efficiency (MultiDocsEnum just uses base class impl) // TODO: would passing deleted docs lead to better efficiency over checking the fastForRandomSet? docsEnum = termsEnum.docs(null, docsEnum); c=0; if (docsEnum instanceof MultiDocsEnum) { MultiDocsEnum.EnumWithSlice[] subs = ((MultiDocsEnum)docsEnum).getSubs(); int numSubs = ((MultiDocsEnum)docsEnum).getNumSubs(); for (int subindex = 0; subindex<numSubs; subindex++) { MultiDocsEnum.EnumWithSlice sub = subs[subindex]; if (sub.docsEnum == null) continue; DocsEnum.BulkReadResult bulk = sub.docsEnum.getBulkResult(); int base = sub.slice.start; for (;;) { int nDocs = sub.docsEnum.read(); if (nDocs == 0) break; int[] docArr = bulk.docs.ints; // this might be movable outside the loop, but perhaps not worth the risk. int end = bulk.docs.offset + nDocs; for (int i=bulk.docs.offset; i<end; i++) { if (fastForRandomSet.exists(docArr[i]+base)) c++; } } } } else { // this should be the same bulk result object if sharing of the docsEnum succeeded DocsEnum.BulkReadResult bulk = docsEnum.getBulkResult(); for (;;) { int nDocs = docsEnum.read(); if (nDocs == 0) break; int[] docArr = bulk.docs.ints; // this might be movable outside the loop, but perhaps not worth the risk. int end = bulk.docs.offset + nDocs; for (int i=bulk.docs.offset; i<end; i++) { if (fastForRandomSet.exists(docArr[i])) c++; } } } } if (sortByCount) { if (c>min) { BytesRef termCopy = BytesRef.deepCopyOf(term); queue.add(new CountPair<BytesRef,Integer>(termCopy, c)); if (queue.size()>=maxsize) min=queue.last().val; } } else { if (c >= mincount && --off<0) { if (--lim<0) break; ft.indexedToReadable(term, charsRef); res.add(charsRef.toString(), c); } } } term = termsEnum.next(); } } if (sortByCount) { for (CountPair<BytesRef,Integer> p : queue) { if (--off>=0) continue; if (--lim<0) break; ft.indexedToReadable(p.key, charsRef); res.add(charsRef.toString(), p.val); } } if (missing) { res.add(null, getFieldMissingCount(searcher,docs,field)); } return res; }
protected void visitTerm(TermsEnum te, int termNum) throws IOException { if (termNum >= maxTermCounts.length) { // resize by doubling - for very large number of unique terms, expanding // by 4K and resultant GC will dominate uninvert times. Resize at end if material int[] newMaxTermCounts = new int[maxTermCounts.length*2]; System.arraycopy(maxTermCounts, 0, newMaxTermCounts, 0, termNum); maxTermCounts = newMaxTermCounts; } final BytesRef term = te.term(); if (te.docFreq() > maxTermDocFreq) { TopTerm topTerm = new TopTerm(); topTerm.term = new BytesRef(term); topTerm.termNum = termNum; bigTerms.put(topTerm.termNum, topTerm); if (deState == null) { deState = new SolrIndexSearcher.DocsEnumState(); deState.fieldName = field; // deState.termsEnum = te.tenum; deState.termsEnum = te; // TODO: check for MultiTermsEnum in SolrIndexSearcher could now fail? deState.docsEnum = docsEnum; deState.minSetSizeCached = maxTermDocFreq; } docsEnum = deState.docsEnum; DocSet set = searcher.getDocSet(deState); maxTermCounts[termNum] = set.size(); } }
protected void visitTerm(TermsEnum te, int termNum) throws IOException { if (termNum >= maxTermCounts.length) { // resize by doubling - for very large number of unique terms, expanding // by 4K and resultant GC will dominate uninvert times. Resize at end if material int[] newMaxTermCounts = new int[maxTermCounts.length*2]; System.arraycopy(maxTermCounts, 0, newMaxTermCounts, 0, termNum); maxTermCounts = newMaxTermCounts; } final BytesRef term = te.term(); if (te.docFreq() > maxTermDocFreq) { TopTerm topTerm = new TopTerm(); topTerm.term = BytesRef.deepCopyOf(term); topTerm.termNum = termNum; bigTerms.put(topTerm.termNum, topTerm); if (deState == null) { deState = new SolrIndexSearcher.DocsEnumState(); deState.fieldName = field; // deState.termsEnum = te.tenum; deState.termsEnum = te; // TODO: check for MultiTermsEnum in SolrIndexSearcher could now fail? deState.docsEnum = docsEnum; deState.minSetSizeCached = maxTermDocFreq; } docsEnum = deState.docsEnum; DocSet set = searcher.getDocSet(deState); maxTermCounts[termNum] = set.size(); } }
public void process(ResponseBuilder rb) throws IOException { SolrParams params = rb.req.getParams(); if (!params.getBool(TermsParams.TERMS, false)) return; String[] fields = params.getParams(TermsParams.TERMS_FIELD); NamedList<Object> termsResult = new SimpleOrderedMap<Object>(); rb.rsp.add("terms", termsResult); if (fields == null || fields.length==0) return; int limit = params.getInt(TermsParams.TERMS_LIMIT, 10); if (limit < 0) { limit = Integer.MAX_VALUE; } String lowerStr = params.get(TermsParams.TERMS_LOWER); String upperStr = params.get(TermsParams.TERMS_UPPER); boolean upperIncl = params.getBool(TermsParams.TERMS_UPPER_INCLUSIVE, false); boolean lowerIncl = params.getBool(TermsParams.TERMS_LOWER_INCLUSIVE, true); boolean sort = !TermsParams.TERMS_SORT_INDEX.equals( params.get(TermsParams.TERMS_SORT, TermsParams.TERMS_SORT_COUNT)); int freqmin = params.getInt(TermsParams.TERMS_MINCOUNT, 1); int freqmax = params.getInt(TermsParams.TERMS_MAXCOUNT, UNLIMITED_MAX_COUNT); if (freqmax<0) { freqmax = Integer.MAX_VALUE; } String prefix = params.get(TermsParams.TERMS_PREFIX_STR); String regexp = params.get(TermsParams.TERMS_REGEXP_STR); Pattern pattern = regexp != null ? Pattern.compile(regexp, resolveRegexpFlags(params)) : null; boolean raw = params.getBool(TermsParams.TERMS_RAW, false); final IndexReader indexReader = rb.req.getSearcher().getTopReaderContext().reader; Fields lfields = MultiFields.getFields(indexReader); for (String field : fields) { NamedList<Integer> fieldTerms = new NamedList<Integer>(); termsResult.add(field, fieldTerms); Terms terms = lfields == null ? null : lfields.terms(field); if (terms == null) { // no terms for this field continue; } FieldType ft = raw ? null : rb.req.getSchema().getFieldTypeNoEx(field); if (ft==null) ft = new StrField(); // prefix must currently be text BytesRef prefixBytes = prefix==null ? null : new BytesRef(prefix); BytesRef upperBytes = null; if (upperStr != null) { upperBytes = new BytesRef(); ft.readableToIndexed(upperStr, upperBytes); } BytesRef lowerBytes; if (lowerStr == null) { // If no lower bound was specified, use the prefix lowerBytes = prefixBytes; } else { lowerBytes = new BytesRef(); if (raw) { // TODO: how to handle binary? perhaps we don't for "raw"... or if the field exists // perhaps we detect if the FieldType is non-character and expect hex if so? lowerBytes = new BytesRef(lowerStr); } else { lowerBytes = new BytesRef(); ft.readableToIndexed(lowerStr, lowerBytes); } } TermsEnum termsEnum = terms.iterator(null); BytesRef term = null; if (lowerBytes != null) { if (termsEnum.seekCeil(lowerBytes, true) == TermsEnum.SeekStatus.END) { termsEnum = null; } else { term = termsEnum.term(); //Only advance the enum if we are excluding the lower bound and the lower Term actually matches if (lowerIncl == false && term.equals(lowerBytes)) { term = termsEnum.next(); } } } else { // position termsEnum on first term term = termsEnum.next(); } int i = 0; BoundedTreeSet<CountPair<BytesRef, Integer>> queue = (sort ? new BoundedTreeSet<CountPair<BytesRef, Integer>>(limit) : null); CharsRef external = new CharsRef(); while (term != null && (i<limit || sort)) { boolean externalized = false; // did we fill in "external" yet for this term? // stop if the prefix doesn't match if (prefixBytes != null && !term.startsWith(prefixBytes)) break; if (pattern != null) { // indexed text or external text? // TODO: support "raw" mode? ft.indexedToReadable(term, external); externalized = true; if (!pattern.matcher(external).matches()) { term = termsEnum.next(); continue; } } if (upperBytes != null) { int upperCmp = term.compareTo(upperBytes); // if we are past the upper term, or equal to it (when don't include upper) then stop. if (upperCmp>0 || (upperCmp==0 && !upperIncl)) break; } // This is a good term in the range. Check if mincount/maxcount conditions are satisfied. int docFreq = termsEnum.docFreq(); if (docFreq >= freqmin && docFreq <= freqmax) { // add the term to the list if (sort) { queue.add(new CountPair<BytesRef, Integer>(new BytesRef(term), docFreq)); } else { // TODO: handle raw somehow if (!externalized) { ft.indexedToReadable(term, external); } fieldTerms.add(external.toString(), docFreq); i++; } } term = termsEnum.next(); } if (sort) { for (CountPair<BytesRef, Integer> item : queue) { if (i >= limit) break; ft.indexedToReadable(item.key, external); fieldTerms.add(external.toString(), item.val); i++; } } } }
public void process(ResponseBuilder rb) throws IOException { SolrParams params = rb.req.getParams(); if (!params.getBool(TermsParams.TERMS, false)) return; String[] fields = params.getParams(TermsParams.TERMS_FIELD); NamedList<Object> termsResult = new SimpleOrderedMap<Object>(); rb.rsp.add("terms", termsResult); if (fields == null || fields.length==0) return; int limit = params.getInt(TermsParams.TERMS_LIMIT, 10); if (limit < 0) { limit = Integer.MAX_VALUE; } String lowerStr = params.get(TermsParams.TERMS_LOWER); String upperStr = params.get(TermsParams.TERMS_UPPER); boolean upperIncl = params.getBool(TermsParams.TERMS_UPPER_INCLUSIVE, false); boolean lowerIncl = params.getBool(TermsParams.TERMS_LOWER_INCLUSIVE, true); boolean sort = !TermsParams.TERMS_SORT_INDEX.equals( params.get(TermsParams.TERMS_SORT, TermsParams.TERMS_SORT_COUNT)); int freqmin = params.getInt(TermsParams.TERMS_MINCOUNT, 1); int freqmax = params.getInt(TermsParams.TERMS_MAXCOUNT, UNLIMITED_MAX_COUNT); if (freqmax<0) { freqmax = Integer.MAX_VALUE; } String prefix = params.get(TermsParams.TERMS_PREFIX_STR); String regexp = params.get(TermsParams.TERMS_REGEXP_STR); Pattern pattern = regexp != null ? Pattern.compile(regexp, resolveRegexpFlags(params)) : null; boolean raw = params.getBool(TermsParams.TERMS_RAW, false); final IndexReader indexReader = rb.req.getSearcher().getTopReaderContext().reader; Fields lfields = MultiFields.getFields(indexReader); for (String field : fields) { NamedList<Integer> fieldTerms = new NamedList<Integer>(); termsResult.add(field, fieldTerms); Terms terms = lfields == null ? null : lfields.terms(field); if (terms == null) { // no terms for this field continue; } FieldType ft = raw ? null : rb.req.getSchema().getFieldTypeNoEx(field); if (ft==null) ft = new StrField(); // prefix must currently be text BytesRef prefixBytes = prefix==null ? null : new BytesRef(prefix); BytesRef upperBytes = null; if (upperStr != null) { upperBytes = new BytesRef(); ft.readableToIndexed(upperStr, upperBytes); } BytesRef lowerBytes; if (lowerStr == null) { // If no lower bound was specified, use the prefix lowerBytes = prefixBytes; } else { lowerBytes = new BytesRef(); if (raw) { // TODO: how to handle binary? perhaps we don't for "raw"... or if the field exists // perhaps we detect if the FieldType is non-character and expect hex if so? lowerBytes = new BytesRef(lowerStr); } else { lowerBytes = new BytesRef(); ft.readableToIndexed(lowerStr, lowerBytes); } } TermsEnum termsEnum = terms.iterator(null); BytesRef term = null; if (lowerBytes != null) { if (termsEnum.seekCeil(lowerBytes, true) == TermsEnum.SeekStatus.END) { termsEnum = null; } else { term = termsEnum.term(); //Only advance the enum if we are excluding the lower bound and the lower Term actually matches if (lowerIncl == false && term.equals(lowerBytes)) { term = termsEnum.next(); } } } else { // position termsEnum on first term term = termsEnum.next(); } int i = 0; BoundedTreeSet<CountPair<BytesRef, Integer>> queue = (sort ? new BoundedTreeSet<CountPair<BytesRef, Integer>>(limit) : null); CharsRef external = new CharsRef(); while (term != null && (i<limit || sort)) { boolean externalized = false; // did we fill in "external" yet for this term? // stop if the prefix doesn't match if (prefixBytes != null && !term.startsWith(prefixBytes)) break; if (pattern != null) { // indexed text or external text? // TODO: support "raw" mode? ft.indexedToReadable(term, external); externalized = true; if (!pattern.matcher(external).matches()) { term = termsEnum.next(); continue; } } if (upperBytes != null) { int upperCmp = term.compareTo(upperBytes); // if we are past the upper term, or equal to it (when don't include upper) then stop. if (upperCmp>0 || (upperCmp==0 && !upperIncl)) break; } // This is a good term in the range. Check if mincount/maxcount conditions are satisfied. int docFreq = termsEnum.docFreq(); if (docFreq >= freqmin && docFreq <= freqmax) { // add the term to the list if (sort) { queue.add(new CountPair<BytesRef, Integer>(BytesRef.deepCopyOf(term), docFreq)); } else { // TODO: handle raw somehow if (!externalized) { ft.indexedToReadable(term, external); } fieldTerms.add(external.toString(), docFreq); i++; } } term = termsEnum.next(); } if (sort) { for (CountPair<BytesRef, Integer> item : queue) { if (i >= limit) break; ft.indexedToReadable(item.key, external); fieldTerms.add(external.toString(), item.val); i++; } } } }
protected Set<BytesRef> getQueryTokenSet(String query, Analyzer analyzer) { try { final Set<BytesRef> tokens = new HashSet<BytesRef>(); final TokenStream tokenStream = analyzer.tokenStream("", new StringReader(query)); final TermToBytesRefAttribute bytesAtt = tokenStream.getAttribute(TermToBytesRefAttribute.class); final BytesRef bytes = bytesAtt.getBytesRef(); tokenStream.reset(); while (tokenStream.incrementToken()) { bytesAtt.fillBytesRef(); tokens.add(new BytesRef(bytes)); } tokenStream.end(); tokenStream.close(); return tokens; } catch (IOException ioe) { throw new RuntimeException("Error occured while iterating over tokenstream", ioe); } }
protected Set<BytesRef> getQueryTokenSet(String query, Analyzer analyzer) { try { final Set<BytesRef> tokens = new HashSet<BytesRef>(); final TokenStream tokenStream = analyzer.tokenStream("", new StringReader(query)); final TermToBytesRefAttribute bytesAtt = tokenStream.getAttribute(TermToBytesRefAttribute.class); final BytesRef bytes = bytesAtt.getBytesRef(); tokenStream.reset(); while (tokenStream.incrementToken()) { bytesAtt.fillBytesRef(); tokens.add(BytesRef.deepCopyOf(bytes)); } tokenStream.end(); tokenStream.close(); return tokens; } catch (IOException ioe) { throw new RuntimeException("Error occured while iterating over tokenstream", ioe); } }
public Weight createWeight(IndexSearcher searcher) throws IOException { throw new UnsupportedOperationException(); }
public Weight createWeight(IndexSearcher searcher) throws IOException { throw new UnsupportedOperationException("Query " + this + " does not implement createWeight"); }
public void testDerby4095OldTriggerRows() throws SQLException { Statement s = createStatement(); s.executeUpdate("CREATE TABLE APP.TAB (I INT)"); s.executeUpdate("CREATE TABLE APP.LOG (I INT, NAME VARCHAR(30), DELTIME TIMESTAMP)"); s.executeUpdate("CREATE TABLE APP.NAMES(ID INT, NAME VARCHAR(30))"); s.executeUpdate("CREATE TRIGGER APP.MYTRIG AFTER DELETE ON APP.TAB REFERENCING OLD_TABLE AS OLDROWS FOR EACH STATEMENT INSERT INTO APP.LOG(i,name,deltime) SELECT OLDROWS.I, NAMES.NAME, CURRENT_TIMESTAMP FROM --DERBY-PROPERTIES joinOrder=FIXED\n NAMES, OLDROWS --DERBY-PROEPERTIES joinStrategy = NESTEDLOOP\n WHERE (OLDROWS.i = NAMES.ID) AND (1 = 1)"); s.executeUpdate("insert into APP.tab values(1)"); s.executeUpdate("insert into APP.tab values(2)"); s.executeUpdate("insert into APP.tab values(3)"); s.executeUpdate("insert into APP.names values(1,'Charlie')"); s.executeUpdate("insert into APP.names values(2,'Hugh')"); s.executeUpdate("insert into APP.names values(3,'Alex')"); // Now delete a row so we fire the trigger. s.executeUpdate("delete from tab where i = 1"); // Check the log to make sure the trigger fired ok ResultSet loggedDeletes = s.executeQuery("SELECT * FROM APP.LOG"); JDBC.assertDrainResults(loggedDeletes, 1); s.executeUpdate("DROP TABLE APP.TAB"); s.executeUpdate("DROP TABLE APP.LOG"); s.executeUpdate("DROP TABLE APP.NAMES"); }
public void testDerby4095OldTriggerRows() throws SQLException { Statement s = createStatement(); s.executeUpdate("CREATE TABLE APP.TAB (I INT)"); s.executeUpdate("CREATE TABLE APP.LOG (I INT, NAME VARCHAR(30), DELTIME TIMESTAMP)"); s.executeUpdate("CREATE TABLE APP.NAMES(ID INT, NAME VARCHAR(30))"); s.executeUpdate("CREATE TRIGGER APP.MYTRIG AFTER DELETE ON APP.TAB REFERENCING OLD_TABLE AS OLDROWS FOR EACH STATEMENT INSERT INTO APP.LOG(i,name,deltime) SELECT OLDROWS.I, NAMES.NAME, CURRENT_TIMESTAMP FROM --DERBY-PROPERTIES joinOrder=FIXED\n NAMES, OLDROWS --DERBY-PROPERTIES joinStrategy = NESTEDLOOP\n WHERE (OLDROWS.i = NAMES.ID) AND (1 = 1)"); s.executeUpdate("insert into APP.tab values(1)"); s.executeUpdate("insert into APP.tab values(2)"); s.executeUpdate("insert into APP.tab values(3)"); s.executeUpdate("insert into APP.names values(1,'Charlie')"); s.executeUpdate("insert into APP.names values(2,'Hugh')"); s.executeUpdate("insert into APP.names values(3,'Alex')"); // Now delete a row so we fire the trigger. s.executeUpdate("delete from tab where i = 1"); // Check the log to make sure the trigger fired ok ResultSet loggedDeletes = s.executeQuery("SELECT * FROM APP.LOG"); JDBC.assertDrainResults(loggedDeletes, 1); s.executeUpdate("DROP TABLE APP.TAB"); s.executeUpdate("DROP TABLE APP.LOG"); s.executeUpdate("DROP TABLE APP.NAMES"); }
public class RowTriggerExecutor extends GenericTriggerExecutor /* Derby - Class org.apache.derby.impl.sql.execute.RowTriggerExecutor Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package org.apache.derby.impl.sql.execute; import org.apache.derby.iapi.sql.execute.CursorResultSet; import org.apache.derby.iapi.error.StandardException; import org.apache.derby.iapi.sql.dictionary.SPSDescriptor; import org.apache.derby.iapi.sql.dictionary.TriggerDescriptor; import org.apache.derby.iapi.sql.conn.LanguageConnectionContext; import org.apache.derby.iapi.sql.Activation; /** * A row trigger executor is an object that executes * a row trigger. It is instantiated at execution time. * There is one per row trigger. */ class RowTriggerExecutor extends GenericTriggerExecutor { /** * Constructor * * @param tec the execution context * @param triggerd the trigger descriptor * @param activation the activation * @param lcc the lcc */ RowTriggerExecutor ( InternalTriggerExecutionContext tec, TriggerDescriptor triggerd, Activation activation, LanguageConnectionContext lcc ) { super(tec, triggerd, activation, lcc); } /** * Fire the trigger based on the event. * * @param event the trigger event * @param brs the before result set * @param ars the after result set * * @exception StandardExcetion on error or general trigger * exception */ void fireTrigger ( TriggerEvent event, CursorResultSet brs, CursorResultSet ars ) throws StandardException { tec.setTrigger(triggerd); try { while (true) { if (brs != null) { if (brs.getNextRow() == null) break; } if (ars != null) { if (ars.getNextRow() == null) break; } tec.setBeforeResultSet(brs == null ? null : TemporaryRowHolderResultSet. getNewRSOnCurrentRow(activation, brs)); tec.setAfterResultSet(ars == null ? null : TemporaryRowHolderResultSet. getNewRSOnCurrentRow(activation, ars)); /* This is the key to handling autoincrement values that might be seen by insert triggers. For an AFTER ROW trigger, update the autoincrement counters before executing the SPS for the trigger. */ if (event.isAfter()) tec.updateAICounters(); executeSPS(getAction()); /* For BEFORE ROW triggers, update the ai values after the SPS has been executed. This way the SPS will see ai values from the previous row. */ if (event.isBefore()) tec.updateAICounters(); } } finally { clearSPS(); tec.clearTrigger(); } } }
public class RowTriggerExecutor extends GenericTriggerExecutor /* Derby - Class org.apache.derby.impl.sql.execute.RowTriggerExecutor Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package org.apache.derby.impl.sql.execute; import org.apache.derby.iapi.sql.execute.CursorResultSet; import org.apache.derby.iapi.error.StandardException; import org.apache.derby.iapi.sql.dictionary.SPSDescriptor; import org.apache.derby.iapi.sql.dictionary.TriggerDescriptor; import org.apache.derby.iapi.sql.conn.LanguageConnectionContext; import org.apache.derby.iapi.sql.Activation; /** * A row trigger executor is an object that executes * a row trigger. It is instantiated at execution time. * There is one per row trigger. */ public class RowTriggerExecutor extends GenericTriggerExecutor { /** * Constructor * * @param tec the execution context * @param triggerd the trigger descriptor * @param activation the activation * @param lcc the lcc */ RowTriggerExecutor ( InternalTriggerExecutionContext tec, TriggerDescriptor triggerd, Activation activation, LanguageConnectionContext lcc ) { super(tec, triggerd, activation, lcc); } /** * Fire the trigger based on the event. * * @param event the trigger event * @param brs the before result set * @param ars the after result set * * @exception StandardExcetion on error or general trigger * exception */ void fireTrigger ( TriggerEvent event, CursorResultSet brs, CursorResultSet ars ) throws StandardException { tec.setTrigger(triggerd); try { while (true) { if (brs != null) { if (brs.getNextRow() == null) break; } if (ars != null) { if (ars.getNextRow() == null) break; } tec.setBeforeResultSet(brs == null ? null : TemporaryRowHolderResultSet. getNewRSOnCurrentRow(activation, brs)); tec.setAfterResultSet(ars == null ? null : TemporaryRowHolderResultSet. getNewRSOnCurrentRow(activation, ars)); /* This is the key to handling autoincrement values that might be seen by insert triggers. For an AFTER ROW trigger, update the autoincrement counters before executing the SPS for the trigger. */ if (event.isAfter()) tec.updateAICounters(); executeSPS(getAction()); /* For BEFORE ROW triggers, update the ai values after the SPS has been executed. This way the SPS will see ai values from the previous row. */ if (event.isBefore()) tec.updateAICounters(); } } finally { clearSPS(); tec.clearTrigger(); } } }
public class StatementTriggerExecutor extends GenericTriggerExecutor /* Derby - Class org.apache.derby.impl.sql.execute.StatementTriggerExecutor Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package org.apache.derby.impl.sql.execute; import org.apache.derby.iapi.sql.execute.CursorResultSet; import org.apache.derby.iapi.sql.dictionary.TriggerDescriptor; import org.apache.derby.iapi.error.StandardException; import org.apache.derby.iapi.sql.conn.LanguageConnectionContext; import org.apache.derby.iapi.sql.Activation; /** * A statement trigger executor is an object that executes * a statement trigger. It is instantiated at execution * time. There is one per statement trigger. */ class StatementTriggerExecutor extends GenericTriggerExecutor { /** * Constructor * * @param tec the execution context * @param triggerd the trigger descriptor * @param activation the activation * @param lcc the lcc */ StatementTriggerExecutor ( InternalTriggerExecutionContext tec, TriggerDescriptor triggerd, Activation activation, LanguageConnectionContext lcc ) { super(tec, triggerd, activation, lcc); } /** * Fire the trigger based on the event. * * @param event the trigger event * @param brs the before result set * @param ars the after result set * * @exception StandardException on error or general trigger * exception */ void fireTrigger ( TriggerEvent event, CursorResultSet brs, CursorResultSet ars ) throws StandardException { tec.setTrigger(triggerd); tec.setBeforeResultSet(brs); tec.setAfterResultSet(ars); try { executeSPS(getAction()); } finally { clearSPS(); tec.clearTrigger(); } } }
public class StatementTriggerExecutor extends GenericTriggerExecutor /* Derby - Class org.apache.derby.impl.sql.execute.StatementTriggerExecutor Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package org.apache.derby.impl.sql.execute; import org.apache.derby.iapi.sql.execute.CursorResultSet; import org.apache.derby.iapi.sql.dictionary.TriggerDescriptor; import org.apache.derby.iapi.error.StandardException; import org.apache.derby.iapi.sql.conn.LanguageConnectionContext; import org.apache.derby.iapi.sql.Activation; /** * A statement trigger executor is an object that executes * a statement trigger. It is instantiated at execution * time. There is one per statement trigger. */ public class StatementTriggerExecutor extends GenericTriggerExecutor { /** * Constructor * * @param tec the execution context * @param triggerd the trigger descriptor * @param activation the activation * @param lcc the lcc */ StatementTriggerExecutor ( InternalTriggerExecutionContext tec, TriggerDescriptor triggerd, Activation activation, LanguageConnectionContext lcc ) { super(tec, triggerd, activation, lcc); } /** * Fire the trigger based on the event. * * @param event the trigger event * @param brs the before result set * @param ars the after result set * * @exception StandardException on error or general trigger * exception */ void fireTrigger ( TriggerEvent event, CursorResultSet brs, CursorResultSet ars ) throws StandardException { tec.setTrigger(triggerd); tec.setBeforeResultSet(brs); tec.setAfterResultSet(ars); try { executeSPS(getAction()); } finally { clearSPS(); tec.clearTrigger(); } } }
public void stop() { boolean OK = false; if (rawStoreFactory != null) { DaemonService rawStoreDaemon = rawStoreFactory.getDaemon(); if (rawStoreDaemon != null) rawStoreDaemon.stop(); } boolean logBootTrace = PropertyUtil.getSystemBoolean(Property.LOG_BOOT_TRACE); istream.println(LINE); logMsg("\n" + new Date() + MessageService.getTextMessage( MessageId.STORE_SHUTDOWN_MSG, getIdentifier(), getRootDirectory(), // print object and ide of classloader. // Cast to object so we don't get just the toString() // method (Object) this.getClass().getClassLoader())); if (logBootTrace) Monitor.logThrowable(new Throwable("shutdown trace")); if (!isCorrupt) { try { if (pageCache != null && containerCache != null) { pageCache.shutdown(); containerCache.shutdown(); OK = true; } } catch (StandardException se) { se.printStackTrace(istream.getPrintWriter()); } } removeTempDirectory(); if (isReadOnly()) // do enough to close all files, then return { storageFactory.shutdown(); return; } // re-enable stub removal until a better method can be found. // only remove stub if caches are cleaned if (removeStubsOK && OK) removeStubs(); releaseJBMSLockOnDB(); writableStorageFactory.shutdown(); } // end of stop
public void stop() { boolean OK = false; if (rawStoreFactory != null) { DaemonService rawStoreDaemon = rawStoreFactory.getDaemon(); if (rawStoreDaemon != null) rawStoreDaemon.stop(); } boolean logBootTrace = PropertyUtil.getSystemBoolean(Property.LOG_BOOT_TRACE); logMsg(LINE); logMsg("\n" + new Date() + MessageService.getTextMessage( MessageId.STORE_SHUTDOWN_MSG, getIdentifier(), getRootDirectory(), // print object and ide of classloader. // Cast to object so we don't get just the toString() // method (Object) this.getClass().getClassLoader())); if (logBootTrace) Monitor.logThrowable(new Throwable("shutdown trace")); if (!isCorrupt) { try { if (pageCache != null && containerCache != null) { pageCache.shutdown(); containerCache.shutdown(); OK = true; } } catch (StandardException se) { se.printStackTrace(istream.getPrintWriter()); } } removeTempDirectory(); if (isReadOnly()) // do enough to close all files, then return { storageFactory.shutdown(); return; } // re-enable stub removal until a better method can be found. // only remove stub if caches are cleaned if (removeStubsOK && OK) removeStubs(); releaseJBMSLockOnDB(); writableStorageFactory.shutdown(); } // end of stop
public int run(String[] args) throws Exception { addInputOption(); addOutputOption(); addOption("numRecommendations", "n", "Number of recommendations per user", String.valueOf(AggregateAndRecommendReducer.DEFAULT_NUM_RECOMMENDATIONS)); addOption("usersFile", null, "File of users to recommend for", null); addOption("itemsFile", null, "File of items to recommend for", null); addOption("filterFile", "f", "File containing comma-separated userID,itemID pairs. Used to exclude the item from " + "the recommendations for that user (optional)", null); addOption("booleanData", "b", "Treat input as without pref values", Boolean.FALSE.toString()); addOption("maxPrefsPerUser", "mxp", "Maximum number of preferences considered per user in final recommendation phase", String.valueOf(UserVectorSplitterMapper.DEFAULT_MAX_PREFS_PER_USER_CONSIDERED)); addOption("minPrefsPerUser", "mp", "ignore users with less preferences than this in the similarity computation " + "(default: " + DEFAULT_MIN_PREFS_PER_USER + ')', String.valueOf(DEFAULT_MIN_PREFS_PER_USER)); addOption("maxSimilaritiesPerItem", "m", "Maximum number of similarities considered per item ", String.valueOf(DEFAULT_MAX_SIMILARITIES_PER_ITEM)); addOption("maxPrefsPerUserInItemSimilarity", "mppuiis", "max number of preferences to consider per user in the " + "item similarity computation phase, users with more preferences will be sampled down (default: " + DEFAULT_MAX_PREFS_PER_USER + ')', String.valueOf(DEFAULT_MAX_PREFS_PER_USER)); addOption("similarityClassname", "s", "Name of distributed similarity measures class to instantiate, " + "alternatively use one of the predefined similarities (" + VectorSimilarityMeasures.list() + ')'); addOption("threshold", "tr", "discard item pairs with a similarity value below this", false); Map<String,String> parsedArgs = parseArguments(args); if (parsedArgs == null) { return -1; } Path outputPath = getOutputPath(); int numRecommendations = Integer.parseInt(parsedArgs.get("--numRecommendations")); String usersFile = parsedArgs.get("--usersFile"); String itemsFile = parsedArgs.get("--itemsFile"); String filterFile = parsedArgs.get("--filterFile"); boolean booleanData = Boolean.valueOf(parsedArgs.get("--booleanData")); int maxPrefsPerUser = Integer.parseInt(parsedArgs.get("--maxPrefsPerUser")); int minPrefsPerUser = Integer.parseInt(parsedArgs.get("--minPrefsPerUser")); int maxPrefsPerUserInItemSimilarity = Integer.parseInt(parsedArgs.get("--maxPrefsPerUserInItemSimilarity")); int maxSimilaritiesPerItem = Integer.parseInt(parsedArgs.get("--maxSimilaritiesPerItem")); String similarityClassname = parsedArgs.get("--similarityClassname"); double threshold = parsedArgs.containsKey("--threshold") ? Double.parseDouble(parsedArgs.get("--threshold")) : RowSimilarityJob.NO_THRESHOLD; Path prepPath = getTempPath("preparePreferenceMatrix"); Path similarityMatrixPath = getTempPath("similarityMatrix"); Path prePartialMultiplyPath1 = getTempPath("prePartialMultiply1"); Path prePartialMultiplyPath2 = getTempPath("prePartialMultiply2"); Path explicitFilterPath = getTempPath("explicitFilterPath"); Path partialMultiplyPath = getTempPath("partialMultiply"); AtomicInteger currentPhase = new AtomicInteger(); int numberOfUsers = -1; if (shouldRunNextPhase(parsedArgs, currentPhase)) { ToolRunner.run(getConf(), new PreparePreferenceMatrixJob(), new String[] { "--input", getInputPath().toString(), "--output", prepPath.toString(), "--maxPrefsPerUser", String.valueOf(maxPrefsPerUserInItemSimilarity), "--minPrefsPerUser", String.valueOf(minPrefsPerUser), "--booleanData", String.valueOf(booleanData), "--tempDir", getTempPath().toString() }); numberOfUsers = TasteHadoopUtils.readInt(new Path(prepPath, PreparePreferenceMatrixJob.NUM_USERS), getConf()); } if (shouldRunNextPhase(parsedArgs, currentPhase)) { /* special behavior if phase 1 is skipped */ if (numberOfUsers == -1) { numberOfUsers = (int) HadoopUtil.countRecords(new Path(prepPath, PreparePreferenceMatrixJob.USER_VECTORS), PathType.LIST, null, getConf()); } /* Once DistributedRowMatrix uses the hadoop 0.20 API, we should refactor this call to something like * new DistributedRowMatrix(...).rowSimilarity(...) */ ToolRunner.run(getConf(), new RowSimilarityJob(), new String[] { "--input", new Path(prepPath, PreparePreferenceMatrixJob.RATING_MATRIX).toString(), "--output", similarityMatrixPath.toString(), "--numberOfColumns", String.valueOf(numberOfUsers), "--similarityClassname", similarityClassname, "--maxSimilaritiesPerRow", String.valueOf(maxSimilaritiesPerItem), "--excludeSelfSimilarity", String.valueOf(Boolean.TRUE), "--threshold", String.valueOf(threshold), "--tempDir", getTempPath().toString() }); } if (shouldRunNextPhase(parsedArgs, currentPhase)) { Job prePartialMultiply1 = prepareJob( similarityMatrixPath, prePartialMultiplyPath1, SequenceFileInputFormat.class, SimilarityMatrixRowWrapperMapper.class, VarIntWritable.class, VectorOrPrefWritable.class, Reducer.class, VarIntWritable.class, VectorOrPrefWritable.class, SequenceFileOutputFormat.class); prePartialMultiply1.waitForCompletion(true); Job prePartialMultiply2 = prepareJob(new Path(prepPath, PreparePreferenceMatrixJob.USER_VECTORS), prePartialMultiplyPath2, SequenceFileInputFormat.class, UserVectorSplitterMapper.class, VarIntWritable.class, VectorOrPrefWritable.class, Reducer.class, VarIntWritable.class, VectorOrPrefWritable.class, SequenceFileOutputFormat.class); if (usersFile != null) { prePartialMultiply2.getConfiguration().set(UserVectorSplitterMapper.USERS_FILE, usersFile); } prePartialMultiply2.getConfiguration().setInt(UserVectorSplitterMapper.MAX_PREFS_PER_USER_CONSIDERED, maxPrefsPerUser); prePartialMultiply2.waitForCompletion(true); Job partialMultiply = prepareJob( new Path(prePartialMultiplyPath1 + "," + prePartialMultiplyPath2), partialMultiplyPath, SequenceFileInputFormat.class, Mapper.class, VarIntWritable.class, VectorOrPrefWritable.class, ToVectorAndPrefReducer.class, VarIntWritable.class, VectorAndPrefsWritable.class, SequenceFileOutputFormat.class); setS3SafeCombinedInputPath(partialMultiply, getTempPath(), prePartialMultiplyPath1, prePartialMultiplyPath2); partialMultiply.waitForCompletion(true); } if (shouldRunNextPhase(parsedArgs, currentPhase)) { /* convert the user/item pairs to filter if a filterfile has been specified */ if (filterFile != null) { Job itemFiltering = prepareJob(new Path(filterFile), explicitFilterPath, TextInputFormat.class, ItemFilterMapper.class, VarLongWritable.class, VarLongWritable.class, ItemFilterAsVectorAndPrefsReducer.class, VarIntWritable.class, VectorAndPrefsWritable.class, SequenceFileOutputFormat.class); itemFiltering.waitForCompletion(true); } String aggregateAndRecommendInput = partialMultiplyPath.toString(); if (filterFile != null) { aggregateAndRecommendInput += "," + explicitFilterPath; } Job aggregateAndRecommend = prepareJob( new Path(aggregateAndRecommendInput), outputPath, SequenceFileInputFormat.class, PartialMultiplyMapper.class, VarLongWritable.class, PrefAndSimilarityColumnWritable.class, AggregateAndRecommendReducer.class, VarLongWritable.class, RecommendedItemsWritable.class, TextOutputFormat.class); Configuration aggregateAndRecommendConf = aggregateAndRecommend.getConfiguration(); if (itemsFile != null) { aggregateAndRecommendConf.set(AggregateAndRecommendReducer.ITEMS_FILE, itemsFile); } if (filterFile != null) { setS3SafeCombinedInputPath(aggregateAndRecommend, getTempPath(), partialMultiplyPath, explicitFilterPath); } setIOSort(aggregateAndRecommend); aggregateAndRecommendConf.set(AggregateAndRecommendReducer.ITEMID_INDEX_PATH, new Path(prepPath, PreparePreferenceMatrixJob.ITEMID_INDEX).toString()); aggregateAndRecommendConf.setInt(AggregateAndRecommendReducer.NUM_RECOMMENDATIONS, numRecommendations); aggregateAndRecommendConf.setBoolean(BOOLEAN_DATA, booleanData); aggregateAndRecommend.waitForCompletion(true); } return 0; }
public int run(String[] args) throws Exception { addInputOption(); addOutputOption(); addOption("numRecommendations", "n", "Number of recommendations per user", String.valueOf(AggregateAndRecommendReducer.DEFAULT_NUM_RECOMMENDATIONS)); addOption("usersFile", null, "File of users to recommend for", null); addOption("itemsFile", null, "File of items to recommend for", null); addOption("filterFile", "f", "File containing comma-separated userID,itemID pairs. Used to exclude the item from " + "the recommendations for that user (optional)", null); addOption("booleanData", "b", "Treat input as without pref values", Boolean.FALSE.toString()); addOption("maxPrefsPerUser", "mxp", "Maximum number of preferences considered per user in final recommendation phase", String.valueOf(UserVectorSplitterMapper.DEFAULT_MAX_PREFS_PER_USER_CONSIDERED)); addOption("minPrefsPerUser", "mp", "ignore users with less preferences than this in the similarity computation " + "(default: " + DEFAULT_MIN_PREFS_PER_USER + ')', String.valueOf(DEFAULT_MIN_PREFS_PER_USER)); addOption("maxSimilaritiesPerItem", "m", "Maximum number of similarities considered per item ", String.valueOf(DEFAULT_MAX_SIMILARITIES_PER_ITEM)); addOption("maxPrefsPerUserInItemSimilarity", "mppuiis", "max number of preferences to consider per user in the " + "item similarity computation phase, users with more preferences will be sampled down (default: " + DEFAULT_MAX_PREFS_PER_USER + ')', String.valueOf(DEFAULT_MAX_PREFS_PER_USER)); addOption("similarityClassname", "s", "Name of distributed similarity measures class to instantiate, " + "alternatively use one of the predefined similarities (" + VectorSimilarityMeasures.list() + ')', true); addOption("threshold", "tr", "discard item pairs with a similarity value below this", false); Map<String,String> parsedArgs = parseArguments(args); if (parsedArgs == null) { return -1; } Path outputPath = getOutputPath(); int numRecommendations = Integer.parseInt(parsedArgs.get("--numRecommendations")); String usersFile = parsedArgs.get("--usersFile"); String itemsFile = parsedArgs.get("--itemsFile"); String filterFile = parsedArgs.get("--filterFile"); boolean booleanData = Boolean.valueOf(parsedArgs.get("--booleanData")); int maxPrefsPerUser = Integer.parseInt(parsedArgs.get("--maxPrefsPerUser")); int minPrefsPerUser = Integer.parseInt(parsedArgs.get("--minPrefsPerUser")); int maxPrefsPerUserInItemSimilarity = Integer.parseInt(parsedArgs.get("--maxPrefsPerUserInItemSimilarity")); int maxSimilaritiesPerItem = Integer.parseInt(parsedArgs.get("--maxSimilaritiesPerItem")); String similarityClassname = parsedArgs.get("--similarityClassname"); double threshold = parsedArgs.containsKey("--threshold") ? Double.parseDouble(parsedArgs.get("--threshold")) : RowSimilarityJob.NO_THRESHOLD; Path prepPath = getTempPath("preparePreferenceMatrix"); Path similarityMatrixPath = getTempPath("similarityMatrix"); Path prePartialMultiplyPath1 = getTempPath("prePartialMultiply1"); Path prePartialMultiplyPath2 = getTempPath("prePartialMultiply2"); Path explicitFilterPath = getTempPath("explicitFilterPath"); Path partialMultiplyPath = getTempPath("partialMultiply"); AtomicInteger currentPhase = new AtomicInteger(); int numberOfUsers = -1; if (shouldRunNextPhase(parsedArgs, currentPhase)) { ToolRunner.run(getConf(), new PreparePreferenceMatrixJob(), new String[] { "--input", getInputPath().toString(), "--output", prepPath.toString(), "--maxPrefsPerUser", String.valueOf(maxPrefsPerUserInItemSimilarity), "--minPrefsPerUser", String.valueOf(minPrefsPerUser), "--booleanData", String.valueOf(booleanData), "--tempDir", getTempPath().toString() }); numberOfUsers = TasteHadoopUtils.readInt(new Path(prepPath, PreparePreferenceMatrixJob.NUM_USERS), getConf()); } if (shouldRunNextPhase(parsedArgs, currentPhase)) { /* special behavior if phase 1 is skipped */ if (numberOfUsers == -1) { numberOfUsers = (int) HadoopUtil.countRecords(new Path(prepPath, PreparePreferenceMatrixJob.USER_VECTORS), PathType.LIST, null, getConf()); } /* Once DistributedRowMatrix uses the hadoop 0.20 API, we should refactor this call to something like * new DistributedRowMatrix(...).rowSimilarity(...) */ ToolRunner.run(getConf(), new RowSimilarityJob(), new String[] { "--input", new Path(prepPath, PreparePreferenceMatrixJob.RATING_MATRIX).toString(), "--output", similarityMatrixPath.toString(), "--numberOfColumns", String.valueOf(numberOfUsers), "--similarityClassname", similarityClassname, "--maxSimilaritiesPerRow", String.valueOf(maxSimilaritiesPerItem), "--excludeSelfSimilarity", String.valueOf(Boolean.TRUE), "--threshold", String.valueOf(threshold), "--tempDir", getTempPath().toString() }); } if (shouldRunNextPhase(parsedArgs, currentPhase)) { Job prePartialMultiply1 = prepareJob( similarityMatrixPath, prePartialMultiplyPath1, SequenceFileInputFormat.class, SimilarityMatrixRowWrapperMapper.class, VarIntWritable.class, VectorOrPrefWritable.class, Reducer.class, VarIntWritable.class, VectorOrPrefWritable.class, SequenceFileOutputFormat.class); prePartialMultiply1.waitForCompletion(true); Job prePartialMultiply2 = prepareJob(new Path(prepPath, PreparePreferenceMatrixJob.USER_VECTORS), prePartialMultiplyPath2, SequenceFileInputFormat.class, UserVectorSplitterMapper.class, VarIntWritable.class, VectorOrPrefWritable.class, Reducer.class, VarIntWritable.class, VectorOrPrefWritable.class, SequenceFileOutputFormat.class); if (usersFile != null) { prePartialMultiply2.getConfiguration().set(UserVectorSplitterMapper.USERS_FILE, usersFile); } prePartialMultiply2.getConfiguration().setInt(UserVectorSplitterMapper.MAX_PREFS_PER_USER_CONSIDERED, maxPrefsPerUser); prePartialMultiply2.waitForCompletion(true); Job partialMultiply = prepareJob( new Path(prePartialMultiplyPath1 + "," + prePartialMultiplyPath2), partialMultiplyPath, SequenceFileInputFormat.class, Mapper.class, VarIntWritable.class, VectorOrPrefWritable.class, ToVectorAndPrefReducer.class, VarIntWritable.class, VectorAndPrefsWritable.class, SequenceFileOutputFormat.class); setS3SafeCombinedInputPath(partialMultiply, getTempPath(), prePartialMultiplyPath1, prePartialMultiplyPath2); partialMultiply.waitForCompletion(true); } if (shouldRunNextPhase(parsedArgs, currentPhase)) { /* convert the user/item pairs to filter if a filterfile has been specified */ if (filterFile != null) { Job itemFiltering = prepareJob(new Path(filterFile), explicitFilterPath, TextInputFormat.class, ItemFilterMapper.class, VarLongWritable.class, VarLongWritable.class, ItemFilterAsVectorAndPrefsReducer.class, VarIntWritable.class, VectorAndPrefsWritable.class, SequenceFileOutputFormat.class); itemFiltering.waitForCompletion(true); } String aggregateAndRecommendInput = partialMultiplyPath.toString(); if (filterFile != null) { aggregateAndRecommendInput += "," + explicitFilterPath; } Job aggregateAndRecommend = prepareJob( new Path(aggregateAndRecommendInput), outputPath, SequenceFileInputFormat.class, PartialMultiplyMapper.class, VarLongWritable.class, PrefAndSimilarityColumnWritable.class, AggregateAndRecommendReducer.class, VarLongWritable.class, RecommendedItemsWritable.class, TextOutputFormat.class); Configuration aggregateAndRecommendConf = aggregateAndRecommend.getConfiguration(); if (itemsFile != null) { aggregateAndRecommendConf.set(AggregateAndRecommendReducer.ITEMS_FILE, itemsFile); } if (filterFile != null) { setS3SafeCombinedInputPath(aggregateAndRecommend, getTempPath(), partialMultiplyPath, explicitFilterPath); } setIOSort(aggregateAndRecommend); aggregateAndRecommendConf.set(AggregateAndRecommendReducer.ITEMID_INDEX_PATH, new Path(prepPath, PreparePreferenceMatrixJob.ITEMID_INDEX).toString()); aggregateAndRecommendConf.setInt(AggregateAndRecommendReducer.NUM_RECOMMENDATIONS, numRecommendations); aggregateAndRecommendConf.setBoolean(BOOLEAN_DATA, booleanData); aggregateAndRecommend.waitForCompletion(true); } return 0; }
public void testEmptyDirRollback() throws Exception { // Tests that if IW is created over an empty Directory, some documents are // indexed, flushed (but not committed) and then IW rolls back, then no // files are left in the Directory. Directory dir = newDirectory(); IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)) .setMaxBufferedDocs(2).setMergePolicy(newLogMergePolicy())); String[] files = dir.listAll(); writer.setInfoStream(VERBOSE ? System.out : null); // Creating over empty dir should not create any files, // or, at most the write.lock file final int extraFileCount; if (files.length == 1) { assertEquals("write.lock", files[0]); extraFileCount = 1; } else { assertEquals(0, files.length); extraFileCount = 0; } Document doc = new Document(); // create as many files as possible doc.add(newField("c", "val", Store.YES, Index.ANALYZED, TermVector.WITH_POSITIONS_OFFSETS)); writer.addDocument(doc); // Adding just one document does not call flush yet. assertEquals("only the stored and term vector files should exist in the directory", 5 + extraFileCount, dir.listAll().length); doc = new Document(); doc.add(newField("c", "val", Store.YES, Index.ANALYZED, TermVector.WITH_POSITIONS_OFFSETS)); writer.addDocument(doc); // The second document should cause a flush. assertTrue("flush should have occurred and files should have been created", dir.listAll().length > 5 + extraFileCount); // After rollback, IW should remove all files writer.rollback(); assertEquals("no files should exist in the directory after rollback", 0, dir.listAll().length); // Since we rolled-back above, that close should be a no-op writer.close(); assertEquals("expected a no-op close after IW.rollback()", 0, dir.listAll().length); dir.close(); }
public void testEmptyDirRollback() throws Exception { // Tests that if IW is created over an empty Directory, some documents are // indexed, flushed (but not committed) and then IW rolls back, then no // files are left in the Directory. Directory dir = newDirectory(); IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)) .setMaxBufferedDocs(2).setMergePolicy(newLogMergePolicy())); String[] files = dir.listAll(); writer.setInfoStream(VERBOSE ? System.out : null); // Creating over empty dir should not create any files, // or, at most the write.lock file final int extraFileCount; if (files.length == 1) { assertTrue(files[0].endsWith("write.lock")); extraFileCount = 1; } else { assertEquals(0, files.length); extraFileCount = 0; } Document doc = new Document(); // create as many files as possible doc.add(newField("c", "val", Store.YES, Index.ANALYZED, TermVector.WITH_POSITIONS_OFFSETS)); writer.addDocument(doc); // Adding just one document does not call flush yet. assertEquals("only the stored and term vector files should exist in the directory", 5 + extraFileCount, dir.listAll().length); doc = new Document(); doc.add(newField("c", "val", Store.YES, Index.ANALYZED, TermVector.WITH_POSITIONS_OFFSETS)); writer.addDocument(doc); // The second document should cause a flush. assertTrue("flush should have occurred and files should have been created", dir.listAll().length > 5 + extraFileCount); // After rollback, IW should remove all files writer.rollback(); assertEquals("no files should exist in the directory after rollback", 0, dir.listAll().length); // Since we rolled-back above, that close should be a no-op writer.close(); assertEquals("expected a no-op close after IW.rollback()", 0, dir.listAll().length); dir.close(); }
public int run(String[] args) throws Exception { addInputOption(); addOutputOption(); addOption("numRecommendations", "n", "Number of recommendations per user", String.valueOf(AggregateAndRecommendReducer.DEFAULT_NUM_RECOMMENDATIONS)); addOption("usersFile", null, "File of users to recommend for", null); addOption("itemsFile", null, "File of items to recommend for", null); addOption("filterFile", "f", "File containing comma-separated userID,itemID pairs. Used to exclude the item from " + "the recommendations for that user (optional)", null); addOption("booleanData", "b", "Treat input as without pref values", Boolean.FALSE.toString()); addOption("maxPrefsPerUser", "mxp", "Maximum number of preferences considered per user in final recommendation phase", String.valueOf(UserVectorSplitterMapper.DEFAULT_MAX_PREFS_PER_USER_CONSIDERED)); addOption("minPrefsPerUser", "mp", "ignore users with less preferences than this in the similarity computation " + "(default: " + DEFAULT_MIN_PREFS_PER_USER + ')', String.valueOf(DEFAULT_MIN_PREFS_PER_USER)); addOption("maxSimilaritiesPerItem", "m", "Maximum number of similarities considered per item ", String.valueOf(DEFAULT_MAX_SIMILARITIES_PER_ITEM)); addOption("maxPrefsPerUserInItemSimilarity", "mppuiis", "max number of preferences to consider per user in the " + "item similarity computation phase, users with more preferences will be sampled down (default: " + DEFAULT_MAX_PREFS_PER_USER + ')', String.valueOf(DEFAULT_MAX_PREFS_PER_USER)); addOption("similarityClassname", "s", "Name of distributed similarity measures class to instantiate, " + "alternatively use one of the predefined similarities (" + VectorSimilarityMeasures.list() + ')', true); addOption("threshold", "tr", "discard item pairs with a similarity value below this", false); Map<String, List<String>> parsedArgs = parseArguments(args); if (parsedArgs == null) { return -1; } Path outputPath = getOutputPath(); int numRecommendations = Integer.parseInt(getOption("numRecommendations")); String usersFile = getOption("usersFile"); String itemsFile = getOption("itemsFile"); String filterFile = getOption("filterFile"); boolean booleanData = Boolean.valueOf(getOption("booleanData")); int maxPrefsPerUser = Integer.parseInt(getOption("maxPrefsPerUser")); int minPrefsPerUser = Integer.parseInt(getOption("minPrefsPerUser")); int maxPrefsPerUserInItemSimilarity = Integer.parseInt(getOption("maxPrefsPerUserInItemSimilarity")); int maxSimilaritiesPerItem = Integer.parseInt(getOption("maxSimilaritiesPerItem")); String similarityClassname = getOption("similarityClassname"); double threshold = parsedArgs.containsKey("threshold") ? Double.parseDouble(getOption("threshold")) : RowSimilarityJob.NO_THRESHOLD; Path prepPath = getTempPath("preparePreferenceMatrix"); Path similarityMatrixPath = getTempPath("similarityMatrix"); Path prePartialMultiplyPath1 = getTempPath("prePartialMultiply1"); Path prePartialMultiplyPath2 = getTempPath("prePartialMultiply2"); Path explicitFilterPath = getTempPath("explicitFilterPath"); Path partialMultiplyPath = getTempPath("partialMultiply"); AtomicInteger currentPhase = new AtomicInteger(); int numberOfUsers = -1; if (shouldRunNextPhase(parsedArgs, currentPhase)) { ToolRunner.run(getConf(), new PreparePreferenceMatrixJob(), new String[]{ "--input", getInputPath().toString(), "--output", prepPath.toString(), "--maxPrefsPerUser", String.valueOf(maxPrefsPerUserInItemSimilarity), "--minPrefsPerUser", String.valueOf(minPrefsPerUser), "--booleanData", String.valueOf(booleanData), "--tempDir", getTempPath().toString()}); numberOfUsers = HadoopUtil.readInt(new Path(prepPath, PreparePreferenceMatrixJob.NUM_USERS), getConf()); } if (shouldRunNextPhase(parsedArgs, currentPhase)) { /* special behavior if phase 1 is skipped */ if (numberOfUsers == -1) { numberOfUsers = (int) HadoopUtil.countRecords(new Path(prepPath, PreparePreferenceMatrixJob.USER_VECTORS), PathType.LIST, null, getConf()); } /* Once DistributedRowMatrix uses the hadoop 0.20 API, we should refactor this call to something like * new DistributedRowMatrix(...).rowSimilarity(...) */ //calculate the co-occurrence matrix ToolRunner.run(getConf(), new RowSimilarityJob(), new String[]{ "--input", new Path(prepPath, PreparePreferenceMatrixJob.RATING_MATRIX).toString(), "--output", similarityMatrixPath.toString(), "--numberOfColumns", String.valueOf(numberOfUsers), "--similarityClassname", similarityClassname, "--maxSimilaritiesPerRow", String.valueOf(maxSimilaritiesPerItem), "--excludeSelfSimilarity", String.valueOf(Boolean.TRUE), "--threshold", String.valueOf(threshold), "--tempDir", getTempPath().toString()}); } //start the multiplication of the co-occurrence matrix by the user vectors if (shouldRunNextPhase(parsedArgs, currentPhase)) { Job prePartialMultiply1 = prepareJob( similarityMatrixPath, prePartialMultiplyPath1, SequenceFileInputFormat.class, SimilarityMatrixRowWrapperMapper.class, VarIntWritable.class, VectorOrPrefWritable.class, Reducer.class, VarIntWritable.class, VectorOrPrefWritable.class, SequenceFileOutputFormat.class); boolean succeeded = prePartialMultiply1.waitForCompletion(true); if (!succeeded) return -1; //continue the multiplication Job prePartialMultiply2 = prepareJob(new Path(prepPath, PreparePreferenceMatrixJob.USER_VECTORS), prePartialMultiplyPath2, SequenceFileInputFormat.class, UserVectorSplitterMapper.class, VarIntWritable.class, VectorOrPrefWritable.class, Reducer.class, VarIntWritable.class, VectorOrPrefWritable.class, SequenceFileOutputFormat.class); if (usersFile != null) { prePartialMultiply2.getConfiguration().set(UserVectorSplitterMapper.USERS_FILE, usersFile); } prePartialMultiply2.getConfiguration().setInt(UserVectorSplitterMapper.MAX_PREFS_PER_USER_CONSIDERED, maxPrefsPerUser); succeeded = prePartialMultiply2.waitForCompletion(true); if (!succeeded) return -1; //finish the job Job partialMultiply = prepareJob( new Path(prePartialMultiplyPath1 + "," + prePartialMultiplyPath2), partialMultiplyPath, SequenceFileInputFormat.class, Mapper.class, VarIntWritable.class, VectorOrPrefWritable.class, ToVectorAndPrefReducer.class, VarIntWritable.class, VectorAndPrefsWritable.class, SequenceFileOutputFormat.class); setS3SafeCombinedInputPath(partialMultiply, getTempPath(), prePartialMultiplyPath1, prePartialMultiplyPath2); succeeded = partialMultiply.waitForCompletion(true); if (!succeeded) return -1; } if (shouldRunNextPhase(parsedArgs, currentPhase)) { //filter out any users we don't care about /* convert the user/item pairs to filter if a filterfile has been specified */ if (filterFile != null) { Job itemFiltering = prepareJob(new Path(filterFile), explicitFilterPath, TextInputFormat.class, ItemFilterMapper.class, VarLongWritable.class, VarLongWritable.class, ItemFilterAsVectorAndPrefsReducer.class, VarIntWritable.class, VectorAndPrefsWritable.class, SequenceFileOutputFormat.class); boolean succeeded = itemFiltering.waitForCompletion(true); if (!succeeded) return -1; } String aggregateAndRecommendInput = partialMultiplyPath.toString(); if (filterFile != null) { aggregateAndRecommendInput += "," + explicitFilterPath; } //extract out the recommendations Job aggregateAndRecommend = prepareJob( new Path(aggregateAndRecommendInput), outputPath, SequenceFileInputFormat.class, PartialMultiplyMapper.class, VarLongWritable.class, PrefAndSimilarityColumnWritable.class, AggregateAndRecommendReducer.class, VarLongWritable.class, RecommendedItemsWritable.class, TextOutputFormat.class); Configuration aggregateAndRecommendConf = aggregateAndRecommend.getConfiguration(); if (itemsFile != null) { aggregateAndRecommendConf.set(AggregateAndRecommendReducer.ITEMS_FILE, itemsFile); } if (filterFile != null) { setS3SafeCombinedInputPath(aggregateAndRecommend, getTempPath(), partialMultiplyPath, explicitFilterPath); } setIOSort(aggregateAndRecommend); aggregateAndRecommendConf.set(AggregateAndRecommendReducer.ITEMID_INDEX_PATH, new Path(prepPath, PreparePreferenceMatrixJob.ITEMID_INDEX).toString()); aggregateAndRecommendConf.setInt(AggregateAndRecommendReducer.NUM_RECOMMENDATIONS, numRecommendations); aggregateAndRecommendConf.setBoolean(BOOLEAN_DATA, booleanData); boolean succeeded = aggregateAndRecommend.waitForCompletion(true); if (!succeeded) return -1; } return 0; }
public int run(String[] args) throws Exception { addInputOption(); addOutputOption(); addOption("numRecommendations", "n", "Number of recommendations per user", String.valueOf(AggregateAndRecommendReducer.DEFAULT_NUM_RECOMMENDATIONS)); addOption("usersFile", null, "File of users to recommend for", null); addOption("itemsFile", null, "File of items to recommend for", null); addOption("filterFile", "f", "File containing comma-separated userID,itemID pairs. Used to exclude the item from " + "the recommendations for that user (optional)", null); addOption("booleanData", "b", "Treat input as without pref values", Boolean.FALSE.toString()); addOption("maxPrefsPerUser", "mxp", "Maximum number of preferences considered per user in final recommendation phase", String.valueOf(UserVectorSplitterMapper.DEFAULT_MAX_PREFS_PER_USER_CONSIDERED)); addOption("minPrefsPerUser", "mp", "ignore users with less preferences than this in the similarity computation " + "(default: " + DEFAULT_MIN_PREFS_PER_USER + ')', String.valueOf(DEFAULT_MIN_PREFS_PER_USER)); addOption("maxSimilaritiesPerItem", "m", "Maximum number of similarities considered per item ", String.valueOf(DEFAULT_MAX_SIMILARITIES_PER_ITEM)); addOption("maxPrefsPerUserInItemSimilarity", "mppuiis", "max number of preferences to consider per user in the " + "item similarity computation phase, users with more preferences will be sampled down (default: " + DEFAULT_MAX_PREFS_PER_USER + ')', String.valueOf(DEFAULT_MAX_PREFS_PER_USER)); addOption("similarityClassname", "s", "Name of distributed similarity measures class to instantiate, " + "alternatively use one of the predefined similarities (" + VectorSimilarityMeasures.list() + ')', true); addOption("threshold", "tr", "discard item pairs with a similarity value below this", false); Map<String, List<String>> parsedArgs = parseArguments(args); if (parsedArgs == null) { return -1; } Path outputPath = getOutputPath(); int numRecommendations = Integer.parseInt(getOption("numRecommendations")); String usersFile = getOption("usersFile"); String itemsFile = getOption("itemsFile"); String filterFile = getOption("filterFile"); boolean booleanData = Boolean.valueOf(getOption("booleanData")); int maxPrefsPerUser = Integer.parseInt(getOption("maxPrefsPerUser")); int minPrefsPerUser = Integer.parseInt(getOption("minPrefsPerUser")); int maxPrefsPerUserInItemSimilarity = Integer.parseInt(getOption("maxPrefsPerUserInItemSimilarity")); int maxSimilaritiesPerItem = Integer.parseInt(getOption("maxSimilaritiesPerItem")); String similarityClassname = getOption("similarityClassname"); double threshold = hasOption("threshold") ? Double.parseDouble(getOption("threshold")) : RowSimilarityJob.NO_THRESHOLD; Path prepPath = getTempPath("preparePreferenceMatrix"); Path similarityMatrixPath = getTempPath("similarityMatrix"); Path prePartialMultiplyPath1 = getTempPath("prePartialMultiply1"); Path prePartialMultiplyPath2 = getTempPath("prePartialMultiply2"); Path explicitFilterPath = getTempPath("explicitFilterPath"); Path partialMultiplyPath = getTempPath("partialMultiply"); AtomicInteger currentPhase = new AtomicInteger(); int numberOfUsers = -1; if (shouldRunNextPhase(parsedArgs, currentPhase)) { ToolRunner.run(getConf(), new PreparePreferenceMatrixJob(), new String[]{ "--input", getInputPath().toString(), "--output", prepPath.toString(), "--maxPrefsPerUser", String.valueOf(maxPrefsPerUserInItemSimilarity), "--minPrefsPerUser", String.valueOf(minPrefsPerUser), "--booleanData", String.valueOf(booleanData), "--tempDir", getTempPath().toString()}); numberOfUsers = HadoopUtil.readInt(new Path(prepPath, PreparePreferenceMatrixJob.NUM_USERS), getConf()); } if (shouldRunNextPhase(parsedArgs, currentPhase)) { /* special behavior if phase 1 is skipped */ if (numberOfUsers == -1) { numberOfUsers = (int) HadoopUtil.countRecords(new Path(prepPath, PreparePreferenceMatrixJob.USER_VECTORS), PathType.LIST, null, getConf()); } /* Once DistributedRowMatrix uses the hadoop 0.20 API, we should refactor this call to something like * new DistributedRowMatrix(...).rowSimilarity(...) */ //calculate the co-occurrence matrix ToolRunner.run(getConf(), new RowSimilarityJob(), new String[]{ "--input", new Path(prepPath, PreparePreferenceMatrixJob.RATING_MATRIX).toString(), "--output", similarityMatrixPath.toString(), "--numberOfColumns", String.valueOf(numberOfUsers), "--similarityClassname", similarityClassname, "--maxSimilaritiesPerRow", String.valueOf(maxSimilaritiesPerItem), "--excludeSelfSimilarity", String.valueOf(Boolean.TRUE), "--threshold", String.valueOf(threshold), "--tempDir", getTempPath().toString()}); } //start the multiplication of the co-occurrence matrix by the user vectors if (shouldRunNextPhase(parsedArgs, currentPhase)) { Job prePartialMultiply1 = prepareJob( similarityMatrixPath, prePartialMultiplyPath1, SequenceFileInputFormat.class, SimilarityMatrixRowWrapperMapper.class, VarIntWritable.class, VectorOrPrefWritable.class, Reducer.class, VarIntWritable.class, VectorOrPrefWritable.class, SequenceFileOutputFormat.class); boolean succeeded = prePartialMultiply1.waitForCompletion(true); if (!succeeded) return -1; //continue the multiplication Job prePartialMultiply2 = prepareJob(new Path(prepPath, PreparePreferenceMatrixJob.USER_VECTORS), prePartialMultiplyPath2, SequenceFileInputFormat.class, UserVectorSplitterMapper.class, VarIntWritable.class, VectorOrPrefWritable.class, Reducer.class, VarIntWritable.class, VectorOrPrefWritable.class, SequenceFileOutputFormat.class); if (usersFile != null) { prePartialMultiply2.getConfiguration().set(UserVectorSplitterMapper.USERS_FILE, usersFile); } prePartialMultiply2.getConfiguration().setInt(UserVectorSplitterMapper.MAX_PREFS_PER_USER_CONSIDERED, maxPrefsPerUser); succeeded = prePartialMultiply2.waitForCompletion(true); if (!succeeded) return -1; //finish the job Job partialMultiply = prepareJob( new Path(prePartialMultiplyPath1 + "," + prePartialMultiplyPath2), partialMultiplyPath, SequenceFileInputFormat.class, Mapper.class, VarIntWritable.class, VectorOrPrefWritable.class, ToVectorAndPrefReducer.class, VarIntWritable.class, VectorAndPrefsWritable.class, SequenceFileOutputFormat.class); setS3SafeCombinedInputPath(partialMultiply, getTempPath(), prePartialMultiplyPath1, prePartialMultiplyPath2); succeeded = partialMultiply.waitForCompletion(true); if (!succeeded) return -1; } if (shouldRunNextPhase(parsedArgs, currentPhase)) { //filter out any users we don't care about /* convert the user/item pairs to filter if a filterfile has been specified */ if (filterFile != null) { Job itemFiltering = prepareJob(new Path(filterFile), explicitFilterPath, TextInputFormat.class, ItemFilterMapper.class, VarLongWritable.class, VarLongWritable.class, ItemFilterAsVectorAndPrefsReducer.class, VarIntWritable.class, VectorAndPrefsWritable.class, SequenceFileOutputFormat.class); boolean succeeded = itemFiltering.waitForCompletion(true); if (!succeeded) return -1; } String aggregateAndRecommendInput = partialMultiplyPath.toString(); if (filterFile != null) { aggregateAndRecommendInput += "," + explicitFilterPath; } //extract out the recommendations Job aggregateAndRecommend = prepareJob( new Path(aggregateAndRecommendInput), outputPath, SequenceFileInputFormat.class, PartialMultiplyMapper.class, VarLongWritable.class, PrefAndSimilarityColumnWritable.class, AggregateAndRecommendReducer.class, VarLongWritable.class, RecommendedItemsWritable.class, TextOutputFormat.class); Configuration aggregateAndRecommendConf = aggregateAndRecommend.getConfiguration(); if (itemsFile != null) { aggregateAndRecommendConf.set(AggregateAndRecommendReducer.ITEMS_FILE, itemsFile); } if (filterFile != null) { setS3SafeCombinedInputPath(aggregateAndRecommend, getTempPath(), partialMultiplyPath, explicitFilterPath); } setIOSort(aggregateAndRecommend); aggregateAndRecommendConf.set(AggregateAndRecommendReducer.ITEMID_INDEX_PATH, new Path(prepPath, PreparePreferenceMatrixJob.ITEMID_INDEX).toString()); aggregateAndRecommendConf.setInt(AggregateAndRecommendReducer.NUM_RECOMMENDATIONS, numRecommendations); aggregateAndRecommendConf.setBoolean(BOOLEAN_DATA, booleanData); boolean succeeded = aggregateAndRecommend.waitForCompletion(true); if (!succeeded) return -1; } return 0; }
private static float[] getFloats(FileFloatSource ffs, IndexReader reader) { float[] vals = new float[reader.maxDoc()]; if (ffs.defVal != 0) { Arrays.fill(vals, ffs.defVal); } InputStream is; String fname = "external_" + ffs.field.getName(); try { is = VersionedFile.getLatestFile(ffs.dataDir, fname); } catch (IOException e) { // log, use defaults SolrCore.log.error("Error opening external value source file: " +e); return vals; } BufferedReader r = new BufferedReader(new InputStreamReader(is)); String idName = StringHelper.intern(ffs.keyField.getName()); FieldType idType = ffs.keyField.getType(); boolean sorted=true; // assume sorted until we discover it's not // warning: lucene's termEnum.skipTo() is not optimized... it simply does a next() // because of this, simply ask the reader for a new termEnum rather than // trying to use skipTo() List<String> notFound = new ArrayList<String>(); int notFoundCount=0; int otherErrors=0; TermDocs termDocs = null; Term protoTerm = new Term(idName, ""); TermEnum termEnum = null; // Number of times to try termEnum.next() before resorting to skip int numTimesNext = 10; char delimiter='='; String termVal; boolean hasNext=true; String prevKey=""; String lastVal="\uFFFF\uFFFF\uFFFF\uFFFF\uFFFF\uFFFF\uFFFF\uFFFF"; try { termDocs = reader.termDocs(); termEnum = reader.terms(protoTerm); Term t = termEnum.term(); if (t != null && t.field() == idName) { // intern'd comparison termVal = t.text(); } else { termVal = lastVal; } for (String line; (line=r.readLine())!=null;) { int delimIndex = line.indexOf(delimiter); if (delimIndex < 0) continue; int endIndex = line.length(); /* EOLs should already be removed for BufferedReader.readLine() for(int endIndex = line.length();endIndex>delimIndex+1; endIndex--) { char ch = line.charAt(endIndex-1); if (ch!='\n' && ch!='\r') break; } */ String key = line.substring(0, delimIndex); String val = line.substring(delimIndex+1, endIndex); String internalKey = idType.toInternal(key); float fval; try { fval=Float.parseFloat(val); } catch (Exception e) { if (++otherErrors<=10) { SolrCore.log.error( "Error loading external value source + fileName + " + e + (otherErrors<10 ? "" : "\tSkipping future errors for this file.") ); } continue; // go to next line in file.. leave values as default. } if (sorted) { // make sure this key is greater than the previous key sorted = internalKey.compareTo(prevKey) >= 0; prevKey = internalKey; if (sorted) { int countNext = 0; for(;;) { int cmp = internalKey.compareTo(termVal); if (cmp == 0) { termDocs.seek(termEnum); while (termDocs.next()) { vals[termDocs.doc()] = fval; } break; } else if (cmp < 0) { // term enum has already advanced past current key... we didn't find it. if (notFoundCount<10) { // collect first 10 not found for logging notFound.add(key); } notFoundCount++; break; } else { // termEnum is less than our current key, so skip ahead // try next() a few times to see if we hit or pass the target. // Lucene's termEnum.skipTo() is currently unoptimized (it just does next()) // so the best thing is to simply ask the reader for a new termEnum(target) // if we really need to skip. if (++countNext > numTimesNext) { termEnum = reader.terms(protoTerm.createTerm(internalKey)); t = termEnum.term(); } else { hasNext = termEnum.next(); t = hasNext ? termEnum.term() : null; } if (t != null && t.field() == idName) { // intern'd comparison termVal = t.text(); } else { termVal = lastVal; } } } // end for(;;) } } if (!sorted) { termEnum = reader.terms(protoTerm.createTerm(internalKey)); t = termEnum.term(); if (t != null && t.field() == idName // intern'd comparison && internalKey.equals(t.text())) { termDocs.seek (termEnum); while (termDocs.next()) { vals[termDocs.doc()] = fval; } } else { if (notFoundCount<10) { // collect first 10 not found for logging notFound.add(key); } notFoundCount++; } } } } catch (IOException e) { // log, use defaults SolrCore.log.error("Error loading external value source: " +e); } finally { // swallow exceptions on close so we don't override any // exceptions that happened in the loop if (termDocs!=null) try{termDocs.close();}catch(Exception e){} if (termEnum!=null) try{termEnum.close();}catch(Exception e){} try{r.close();}catch(Exception e){} } SolrCore.log.info("Loaded external value source " + fname + (notFoundCount==0 ? "" : " :"+notFoundCount+" missing keys "+notFound) ); return vals; }
private static float[] getFloats(FileFloatSource ffs, IndexReader reader) { float[] vals = new float[reader.maxDoc()]; if (ffs.defVal != 0) { Arrays.fill(vals, ffs.defVal); } InputStream is; String fname = "external_" + ffs.field.getName(); try { is = VersionedFile.getLatestFile(ffs.dataDir, fname); } catch (IOException e) { // log, use defaults SolrCore.log.error("Error opening external value source file: " +e); return vals; } BufferedReader r = new BufferedReader(new InputStreamReader(is)); String idName = StringHelper.intern(ffs.keyField.getName()); FieldType idType = ffs.keyField.getType(); boolean sorted=true; // assume sorted until we discover it's not // warning: lucene's termEnum.skipTo() is not optimized... it simply does a next() // because of this, simply ask the reader for a new termEnum rather than // trying to use skipTo() List<String> notFound = new ArrayList<String>(); int notFoundCount=0; int otherErrors=0; TermDocs termDocs = null; Term protoTerm = new Term(idName, ""); TermEnum termEnum = null; // Number of times to try termEnum.next() before resorting to skip int numTimesNext = 10; char delimiter='='; String termVal; boolean hasNext=true; String prevKey=""; String lastVal="\uFFFF\uFFFF\uFFFF\uFFFF\uFFFF\uFFFF\uFFFF\uFFFF"; try { termDocs = reader.termDocs(); termEnum = reader.terms(protoTerm); Term t = termEnum.term(); if (t != null && t.field() == idName) { // intern'd comparison termVal = t.text(); } else { termVal = lastVal; } for (String line; (line=r.readLine())!=null;) { int delimIndex = line.lastIndexOf(delimiter); if (delimIndex < 0) continue; int endIndex = line.length(); /* EOLs should already be removed for BufferedReader.readLine() for(int endIndex = line.length();endIndex>delimIndex+1; endIndex--) { char ch = line.charAt(endIndex-1); if (ch!='\n' && ch!='\r') break; } */ String key = line.substring(0, delimIndex); String val = line.substring(delimIndex+1, endIndex); String internalKey = idType.toInternal(key); float fval; try { fval=Float.parseFloat(val); } catch (Exception e) { if (++otherErrors<=10) { SolrCore.log.error( "Error loading external value source + fileName + " + e + (otherErrors<10 ? "" : "\tSkipping future errors for this file.") ); } continue; // go to next line in file.. leave values as default. } if (sorted) { // make sure this key is greater than the previous key sorted = internalKey.compareTo(prevKey) >= 0; prevKey = internalKey; if (sorted) { int countNext = 0; for(;;) { int cmp = internalKey.compareTo(termVal); if (cmp == 0) { termDocs.seek(termEnum); while (termDocs.next()) { vals[termDocs.doc()] = fval; } break; } else if (cmp < 0) { // term enum has already advanced past current key... we didn't find it. if (notFoundCount<10) { // collect first 10 not found for logging notFound.add(key); } notFoundCount++; break; } else { // termEnum is less than our current key, so skip ahead // try next() a few times to see if we hit or pass the target. // Lucene's termEnum.skipTo() is currently unoptimized (it just does next()) // so the best thing is to simply ask the reader for a new termEnum(target) // if we really need to skip. if (++countNext > numTimesNext) { termEnum = reader.terms(protoTerm.createTerm(internalKey)); t = termEnum.term(); } else { hasNext = termEnum.next(); t = hasNext ? termEnum.term() : null; } if (t != null && t.field() == idName) { // intern'd comparison termVal = t.text(); } else { termVal = lastVal; } } } // end for(;;) } } if (!sorted) { termEnum = reader.terms(protoTerm.createTerm(internalKey)); t = termEnum.term(); if (t != null && t.field() == idName // intern'd comparison && internalKey.equals(t.text())) { termDocs.seek (termEnum); while (termDocs.next()) { vals[termDocs.doc()] = fval; } } else { if (notFoundCount<10) { // collect first 10 not found for logging notFound.add(key); } notFoundCount++; } } } } catch (IOException e) { // log, use defaults SolrCore.log.error("Error loading external value source: " +e); } finally { // swallow exceptions on close so we don't override any // exceptions that happened in the loop if (termDocs!=null) try{termDocs.close();}catch(Exception e){} if (termEnum!=null) try{termEnum.close();}catch(Exception e){} try{r.close();}catch(Exception e){} } SolrCore.log.info("Loaded external value source " + fname + (notFoundCount==0 ? "" : " :"+notFoundCount+" missing keys "+notFound) ); return vals; }
public AriesApplicationContext install(AriesApplication app) throws BundleException, ManagementException, ResolverException { if (!app.isResolved()) { app = resolve(app); } // Register an Application Repository for this application Dictionary dict = new Hashtable(); dict.put(ApplicationRepository.REPOSITORY_SCOPE, app.getApplicationMetadata().getApplicationScope()); _bundleContext.registerService(BundleRepository.class.getName(), new ApplicationRepository(_resolver), dict); AriesApplicationContext result = _applicationContextManager.getApplicationContext(app); return result; }
public AriesApplicationContext install(AriesApplication app) throws BundleException, ManagementException, ResolverException { if (!app.isResolved()) { app = resolve(app); } // Register an Application Repository for this application Dictionary dict = new Hashtable(); dict.put(ApplicationRepository.REPOSITORY_SCOPE, app.getApplicationMetadata().getApplicationScope()); _bundleContext.registerService(BundleRepository.class.getName(), new ApplicationRepository(app), dict); AriesApplicationContext result = _applicationContextManager.getApplicationContext(app); return result; }
public UUID getVersion() { return newVersion; } /** * Definitions are serialized as a row with a UUID key, with a magical column named DEFINITION_SCHEMA_COLUMN_NAME * (containing the Avro Schema) and a column per keyspace. Each keyspace column contains a avro.KsDef object * encoded with the Avro schema. */ static RowMutation makeDefinitionMutation(KSMetaData add, KSMetaData remove, UUID versionId) throws IOException { // collect all keyspaces, while removing 'remove' and adding 'add' List<KSMetaData> ksms = new ArrayList<KSMetaData>(); for (String tableName : DatabaseDescriptor.getNonSystemTables()) { if (remove != null && remove.name.equals(tableName) || add != null && add.name.equals(tableName)) continue; ksms.add(DatabaseDescriptor.getTableDefinition(tableName)); } if (add != null) ksms.add(add); // wrap in mutation RowMutation rm = new RowMutation(Table.SYSTEM_TABLE, toUTF8Bytes(versionId)); TimestampClock now = new TimestampClock(System.currentTimeMillis()); // add a column for each keyspace for (KSMetaData ksm : ksms) rm.add(new QueryPath(SCHEMA_CF, null, ksm.name.getBytes(UTF_8)), SerDeUtils.serialize(ksm.deflate()), now); // add the schema rm.add(new QueryPath(SCHEMA_CF, null, DefsTable.DEFINITION_SCHEMA_COLUMN_NAME), org.apache.cassandra.avro.KsDef.SCHEMA$.toString().getBytes(UTF_8), now); return rm; }
public UUID getVersion() { return newVersion; } /** * Definitions are serialized as a row with a UUID key, with a magical column named DEFINITION_SCHEMA_COLUMN_NAME * (containing the Avro Schema) and a column per keyspace. Each keyspace column contains a avro.KsDef object * encoded with the Avro schema. */ static RowMutation makeDefinitionMutation(KSMetaData add, KSMetaData remove, UUID versionId) throws IOException { // collect all keyspaces, while removing 'remove' and adding 'add' List<KSMetaData> ksms = new ArrayList<KSMetaData>(); for (String tableName : DatabaseDescriptor.getNonSystemTables()) { if (remove != null && remove.name.equals(tableName) || add != null && add.name.equals(tableName)) continue; ksms.add(DatabaseDescriptor.getTableDefinition(tableName)); } if (add != null) ksms.add(add); // wrap in mutation RowMutation rm = new RowMutation(Table.SYSTEM_TABLE, toUTF8Bytes(versionId)); TimestampClock now = new TimestampClock(System.currentTimeMillis()); // add a column for each keyspace for (KSMetaData ksm : ksms) rm.add(new QueryPath(SCHEMA_CF, null, ksm.name.getBytes(UTF_8)), SerDeUtils.serialize(ksm.deflate()), now); // add the schema rm.add(new QueryPath(SCHEMA_CF, null, DefsTable.DEFINITION_SCHEMA_COLUMN_NAME), org.apache.cassandra.config.avro.KsDef.SCHEMA$.toString().getBytes(UTF_8), now); return rm; }
public SchemaField getField(String fieldName) { SchemaField f = fields.get(fieldName); if (f != null) return f; for (DynamicField df : dynamicFields) { if (df.matches(fieldName)) return df.makeSchemaField(fieldName); } // Hmmm, default field could also be implemented with a dynamic field of "*". // It would have to be special-cased and only used if nothing else matched. /*** REMOVED -YCS if (defaultFieldType != null) return new SchemaField(fieldName,defaultFieldType); ***/ throw new SolrException(1,"undefined field "+fieldName); }
public SchemaField getField(String fieldName) { SchemaField f = fields.get(fieldName); if (f != null) return f; for (DynamicField df : dynamicFields) { if (df.matches(fieldName)) return df.makeSchemaField(fieldName); } // Hmmm, default field could also be implemented with a dynamic field of "*". // It would have to be special-cased and only used if nothing else matched. /*** REMOVED -YCS if (defaultFieldType != null) return new SchemaField(fieldName,defaultFieldType); ***/ throw new SolrException(400,"undefined field "+fieldName); }
public void close() throws SQLException { // JDK 1.4 javadoc indicates close on a closed connection is a no-op if (!isClosed() && (rootConnection == this) && (!autoCommit && !transactionIsIdle())) { throw newSQLException( SQLState.LANG_INVALID_TRANSACTION_STATE); } close(exceptionClose); }
public void close() throws SQLException { // JDK 1.4 javadoc indicates close on a closed connection is a no-op if (!isClosed() && (rootConnection == this) && (!autoCommit && !transactionIsIdle())) { throw newSQLException( SQLState.CANNOT_CLOSE_ACTIVE_CONNECTION); } close(exceptionClose); }
public URL toURL() throws MalformedURLException { String entryURL = "jar:" + url + "!/" + getParent().getName() + getName(); URL result = new URL(entryURL); return result; }
public URL toURL() throws MalformedURLException { String entryURL = "jar:" + url + "!/" + getName(); URL result = new URL(entryURL); return result; }
public void lockRecordForRead( Latch latch, RecordHandle record, boolean forUpdate) throws StandardException; /** Request a write lock which will be released immediately upon grant. @param t Transaction to associate lock with. @param record Record to lock. @param lockForInsertPreviouskey Lock is for a previous key of a insert. @param waitForLock Should lock request wait until granted?
public void lockRecordForRead( Latch latch, RecordHandle record, boolean forUpdate) throws StandardException; /** Request a write lock which will be released immediately upon grant. @param t Transaction to associate lock with. @param record Record to lock. @param lockForPreviousKey Lock is for a previous key of a insert. @param waitForLock Should lock request wait until granted?
public Term term() { return /*term == null ? null :*/ term.getTerm(); }
public Term term() { return term == null ? null : term.getTerm(); }
public static void testAfter() throws IOException { // create an index RAMDirectory indexStore = new RAMDirectory(); IndexWriter writer = new IndexWriter(indexStore, new SimpleAnalyzer(), true); long now = System.currentTimeMillis(); Document doc = new Document(); // add time that is in the future doc.add(Field.Keyword("datefield", DateField.timeToString(now - 888888))); doc.add(Field.Text("body", "Today is a very sunny day in New York City")); writer.addDocument(doc); writer.optimize(); writer.close(); IndexSearcher searcher = new IndexSearcher(indexStore); // filter that should preserve matches DateFilter df1 = DateFilter.After("datefield", now); // filter that should discard matches DateFilter df2 = DateFilter.After("datefield", now + 999999); // search something that doesn't exist with DateFilter Query query1 = new TermQuery(new Term("body", "NoMatchForThis")); // search for something that does exists Query query2 = new TermQuery(new Term("body", "sunny")); Hits result; // ensure that queries return expected results without DateFilter first result = searcher.search(query1); assertEquals(0, result.length()); result = searcher.search(query2); assertEquals(1, result.length()); // run queries with DateFilter result = searcher.search(query1, df1); assertEquals(0, result.length()); result = searcher.search(query1, df2); assertEquals(0, result.length()); result = searcher.search(query2, df1); assertEquals(1, result.length()); result = searcher.search(query2, df2); assertEquals(0, result.length()); }
public static void testAfter() throws IOException { // create an index RAMDirectory indexStore = new RAMDirectory(); IndexWriter writer = new IndexWriter(indexStore, new SimpleAnalyzer(), true); long now = System.currentTimeMillis(); Document doc = new Document(); // add time that is in the future doc.add(Field.Keyword("datefield", DateField.timeToString(now + 888888))); doc.add(Field.Text("body", "Today is a very sunny day in New York City")); writer.addDocument(doc); writer.optimize(); writer.close(); IndexSearcher searcher = new IndexSearcher(indexStore); // filter that should preserve matches DateFilter df1 = DateFilter.After("datefield", now); // filter that should discard matches DateFilter df2 = DateFilter.After("datefield", now + 999999); // search something that doesn't exist with DateFilter Query query1 = new TermQuery(new Term("body", "NoMatchForThis")); // search for something that does exists Query query2 = new TermQuery(new Term("body", "sunny")); Hits result; // ensure that queries return expected results without DateFilter first result = searcher.search(query1); assertEquals(0, result.length()); result = searcher.search(query2); assertEquals(1, result.length()); // run queries with DateFilter result = searcher.search(query1, df1); assertEquals(0, result.length()); result = searcher.search(query1, df2); assertEquals(0, result.length()); result = searcher.search(query2, df1); assertEquals(1, result.length()); result = searcher.search(query2, df2); assertEquals(0, result.length()); }
private int getConnFromDatabaseName() throws DRDAProtocolException { Properties p = new Properties(); databaseAccessException = null; //if we haven't got the correlation token yet, use session number for drdaID if (session.drdaID == null) session.drdaID = leftBrace + session.connNum + rightBrace; p.put(Attribute.DRDAID_ATTR, session.drdaID); // We pass extra property information for the authentication provider // to successfully re-compute the substitute (hashed) password and // compare it with what we've got from the requester (source). // // If a password attribute appears as part of the connection URL // attributes, we then don't use the substitute hashed password // to authenticate with the engine _as_ the one (if any) as part // of the connection URL attributes, will be used to authenticate // against Derby's BUILT-IN authentication provider - As a reminder, // Derby allows password to be mentioned as part of the connection // URL attributes, as this extra capability could be useful to pass // passwords to external authentication providers for Derby; hence // a password defined as part of the connection URL attributes cannot // be substituted (single-hashed) as it is not recoverable. if ((database.securityMechanism == CodePoint.SECMEC_USRSSBPWD) && (database.dbName.indexOf(Attribute.PASSWORD_ATTR) == -1)) { p.put(Attribute.CLIENT_SECURITY_MECHANISM, String.valueOf(database.securityMechanism)); p.put(Attribute.DRDA_SECTKN_IN, DecryptionManager.toHexString(database.secTokenIn, 0, database.secTokenIn.length)); p.put(Attribute.DRDA_SECTKN_OUT, DecryptionManager.toHexString(database.secTokenOut, 0, database.secTokenOut.length)); } try { database.makeConnection(p); } catch (SQLException se) { String sqlState = se.getSQLState(); // need to set the security check code based on the reason the connection // was denied, Derby doesn't say whether the userid or password caused // the problem, so we will just return userid invalid databaseAccessException = se; for (; se != null; se = se.getNextException()) { if (SanityManager.DEBUG) trace(se.getMessage()); println2Log(database.dbName, session.drdaID, se.getMessage()); } if (sqlState.regionMatches(0,SQLState.LOGIN_FAILED,0,5)) return CodePoint.SECCHKCD_USERIDINVALID; return 0; } catch (Exception e) { // If Derby has shut down for some reason, // we will send an agent error and then try to // get the driver loaded again. We have to get // rid of the client first in case they are holding // the DriverManager lock. println2Log(database.dbName, session.drdaID, "Driver not loaded" + e.getMessage()); try { agentError("Driver not loaded"); } catch (DRDAProtocolException dpe) { // Retry starting the server before rethrowing // the protocol exception. Then hopfully all // will be well when they try again. try { server.startNetworkServer(); } catch (Exception re) { println2Log(database.dbName, session.drdaID, "Failed attempt to reload driver " +re.getMessage() ); } throw dpe; } } // Everything worked so log connection to the database. if (getLogConnections()) println2Log(database.dbName, session.drdaID, "Apache Derby Network Server connected to database " + database.dbName); return 0; }
private int getConnFromDatabaseName() throws DRDAProtocolException { Properties p = new Properties(); databaseAccessException = null; //if we haven't got the correlation token yet, use session number for drdaID if (session.drdaID == null) session.drdaID = leftBrace + session.connNum + rightBrace; p.put(Attribute.DRDAID_ATTR, session.drdaID); // We pass extra property information for the authentication provider // to successfully re-compute the substitute (hashed) password and // compare it with what we've got from the requester (source). // // If a password attribute appears as part of the connection URL // attributes, we then don't use the substitute hashed password // to authenticate with the engine _as_ the one (if any) as part // of the connection URL attributes, will be used to authenticate // against Derby's BUILT-IN authentication provider - As a reminder, // Derby allows password to be mentioned as part of the connection // URL attributes, as this extra capability could be useful to pass // passwords to external authentication providers for Derby; hence // a password defined as part of the connection URL attributes cannot // be substituted (single-hashed) as it is not recoverable. if ((database.securityMechanism == CodePoint.SECMEC_USRSSBPWD) && (database.dbName.indexOf(Attribute.PASSWORD_ATTR) == -1)) { p.put(Attribute.DRDA_SECMEC, String.valueOf(database.securityMechanism)); p.put(Attribute.DRDA_SECTKN_IN, DecryptionManager.toHexString(database.secTokenIn, 0, database.secTokenIn.length)); p.put(Attribute.DRDA_SECTKN_OUT, DecryptionManager.toHexString(database.secTokenOut, 0, database.secTokenOut.length)); } try { database.makeConnection(p); } catch (SQLException se) { String sqlState = se.getSQLState(); // need to set the security check code based on the reason the connection // was denied, Derby doesn't say whether the userid or password caused // the problem, so we will just return userid invalid databaseAccessException = se; for (; se != null; se = se.getNextException()) { if (SanityManager.DEBUG) trace(se.getMessage()); println2Log(database.dbName, session.drdaID, se.getMessage()); } if (sqlState.regionMatches(0,SQLState.LOGIN_FAILED,0,5)) return CodePoint.SECCHKCD_USERIDINVALID; return 0; } catch (Exception e) { // If Derby has shut down for some reason, // we will send an agent error and then try to // get the driver loaded again. We have to get // rid of the client first in case they are holding // the DriverManager lock. println2Log(database.dbName, session.drdaID, "Driver not loaded" + e.getMessage()); try { agentError("Driver not loaded"); } catch (DRDAProtocolException dpe) { // Retry starting the server before rethrowing // the protocol exception. Then hopfully all // will be well when they try again. try { server.startNetworkServer(); } catch (Exception re) { println2Log(database.dbName, session.drdaID, "Failed attempt to reload driver " +re.getMessage() ); } throw dpe; } } // Everything worked so log connection to the database. if (getLogConnections()) println2Log(database.dbName, session.drdaID, "Apache Derby Network Server connected to database " + database.dbName); return 0; }
public boolean authenticateUser(String userName, String userPassword, String databaseName, Properties info ) { // Client security mechanism if any specified // Note: Right now it is only used to handle clients authenticating // via DRDA SECMEC_USRSSBPWD mechanism String clientSecurityMechanism = null; // Client security mechanism (if any) short representation // Default value is none. int secMec = 0; // let's check if the user has been defined as a valid user of the // JBMS system. // We expect to find and match a System property corresponding to the // credentials passed-in. // if (userName == null) // We don't tolerate 'guest' user for now. return false; String definedUserPassword = null, passedUserPassword = null; // If a security mechanism is specified as part of the connection // properties, it indicates that we've to account as far as how the // password is presented to us - in the case of SECMEC_USRSSBPWD // (only expected one at the moment), the password is a substitute // one which has already been hashed differently than what we store // at the database level (for instance) - this will influence how we // assess the substitute password to be legitimate for Derby's // BUILTIN authentication scheme/provider. if ((clientSecurityMechanism = info.getProperty(Attribute.CLIENT_SECURITY_MECHANISM)) != null) { secMec = Integer.parseInt(clientSecurityMechanism); } // // Check if user has been defined at the database or/and // system level. The user (administrator) can configure it the // way he/she wants (as well as forcing users properties to // be retrieved at the datbase level only). // String userNameProperty = org.apache.derby.iapi.reference.Property.USER_PROPERTY_PREFIX.concat( userName); // check if user defined at the database level definedUserPassword = getDatabaseProperty(userNameProperty); if (definedUserPassword != null) { if (secMec != SECMEC_USRSSBPWD) { // encrypt passed-in password passedUserPassword = encryptPassword(userPassword); } else { // Dealing with a client SECMEC - password checking is // slightly different and we need to generate a // password substitute to compare with the substitute // generated one from the client. definedUserPassword = substitutePassword(userName, definedUserPassword, info, true); // As SecMec is SECMEC_USRSSBPWD, expected passed-in password // to be HexString'ified already passedUserPassword = userPassword; } } else { // check if user defined at the system level definedUserPassword = getSystemProperty(userNameProperty); passedUserPassword = userPassword; if ((definedUserPassword != null) && (secMec == SECMEC_USRSSBPWD)) { // Dealing with a client SECMEC - see above comments definedUserPassword = substitutePassword(userName, definedUserPassword, info, false); } } if (definedUserPassword == null) // no such user found return false; // check if the passwords match if (!definedUserPassword.equals(passedUserPassword)) return false; // NOTE: We do not look at the passed-in database name value as // we rely on the authorization service that was put in // in 2.0 . (if a database name was passed-in) // We do have a valid user return true; }
public boolean authenticateUser(String userName, String userPassword, String databaseName, Properties info ) { // Client security mechanism if any specified // Note: Right now it is only used to handle clients authenticating // via DRDA SECMEC_USRSSBPWD mechanism String clientSecurityMechanism = null; // Client security mechanism (if any) short representation // Default value is none. int secMec = 0; // let's check if the user has been defined as a valid user of the // JBMS system. // We expect to find and match a System property corresponding to the // credentials passed-in. // if (userName == null) // We don't tolerate 'guest' user for now. return false; String definedUserPassword = null, passedUserPassword = null; // If a security mechanism is specified as part of the connection // properties, it indicates that we've to account as far as how the // password is presented to us - in the case of SECMEC_USRSSBPWD // (only expected one at the moment), the password is a substitute // one which has already been hashed differently than what we store // at the database level (for instance) - this will influence how we // assess the substitute password to be legitimate for Derby's // BUILTIN authentication scheme/provider. if ((clientSecurityMechanism = info.getProperty(Attribute.DRDA_SECMEC)) != null) { secMec = Integer.parseInt(clientSecurityMechanism); } // // Check if user has been defined at the database or/and // system level. The user (administrator) can configure it the // way he/she wants (as well as forcing users properties to // be retrieved at the datbase level only). // String userNameProperty = org.apache.derby.iapi.reference.Property.USER_PROPERTY_PREFIX.concat( userName); // check if user defined at the database level definedUserPassword = getDatabaseProperty(userNameProperty); if (definedUserPassword != null) { if (secMec != SECMEC_USRSSBPWD) { // encrypt passed-in password passedUserPassword = encryptPassword(userPassword); } else { // Dealing with a client SECMEC - password checking is // slightly different and we need to generate a // password substitute to compare with the substitute // generated one from the client. definedUserPassword = substitutePassword(userName, definedUserPassword, info, true); // As SecMec is SECMEC_USRSSBPWD, expected passed-in password // to be HexString'ified already passedUserPassword = userPassword; } } else { // check if user defined at the system level definedUserPassword = getSystemProperty(userNameProperty); passedUserPassword = userPassword; if ((definedUserPassword != null) && (secMec == SECMEC_USRSSBPWD)) { // Dealing with a client SECMEC - see above comments definedUserPassword = substitutePassword(userName, definedUserPassword, info, false); } } if (definedUserPassword == null) // no such user found return false; // check if the passwords match if (!definedUserPassword.equals(passedUserPassword)) return false; // NOTE: We do not look at the passed-in database name value as // we rely on the authorization service that was put in // in 2.0 . (if a database name was passed-in) // We do have a valid user return true; }
public void onAlive(InetAddress endpoint, EndpointState state) { if (!isClientMode && state.hasToken()) deliverHints(endpoint); }
public void onAlive(InetAddress endpoint, EndpointState state) { if (!isClientMode && StorageService.instance.getTokenMetadata().isMember(endpoint)) deliverHints(endpoint); }
protected Row getReduced() { Comparator<IColumn> colComparator = QueryFilter.getColumnComparator(comparator); Iterator<IColumn> colCollated = IteratorUtils.collatedIterator(colComparator, colIters); ColumnFamily returnCF = null; // First check if this row is in the rowCache. If it is we can skip the rest ColumnFamily cached = cfs.getRawCachedRow(key); if (cached != null) { QueryFilter keyFilter = new QueryFilter(key, filter.path, filter.filter); returnCF = cfs.filterColumnFamily(cached, keyFilter, cfs.metadata.gcGraceSeconds); } else { returnCF = firstMemtable.getColumnFamily(key); // TODO this is a little subtle: the Memtable ColumnIterator has to be a shallow clone of the source CF, // with deletion times set correctly, so we can use it as the "base" CF to add query results to. // (for sstable ColumnIterators we do not care if it is a shallow clone or not.) returnCF = returnCF == null ? ColumnFamily.create(firstMemtable.getTableName(), filter.getColumnFamilyName()) : returnCF.cloneMeShallow(); if (colCollated.hasNext()) { filter.collectCollatedColumns(returnCF, colCollated, gcBefore); } else { returnCF = null; } } Row rv = new Row(key, returnCF); colIters.clear(); key = null; return rv; } }; return new RowIterator(reduced, iterators); }
protected Row getReduced() { Comparator<IColumn> colComparator = filter.filter.getColumnComparator(comparator); Iterator<IColumn> colCollated = IteratorUtils.collatedIterator(colComparator, colIters); ColumnFamily returnCF = null; // First check if this row is in the rowCache. If it is we can skip the rest ColumnFamily cached = cfs.getRawCachedRow(key); if (cached != null) { QueryFilter keyFilter = new QueryFilter(key, filter.path, filter.filter); returnCF = cfs.filterColumnFamily(cached, keyFilter, cfs.metadata.gcGraceSeconds); } else { returnCF = firstMemtable.getColumnFamily(key); // TODO this is a little subtle: the Memtable ColumnIterator has to be a shallow clone of the source CF, // with deletion times set correctly, so we can use it as the "base" CF to add query results to. // (for sstable ColumnIterators we do not care if it is a shallow clone or not.) returnCF = returnCF == null ? ColumnFamily.create(firstMemtable.getTableName(), filter.getColumnFamilyName()) : returnCF.cloneMeShallow(); if (colCollated.hasNext()) { filter.collectCollatedColumns(returnCF, colCollated, gcBefore); } else { returnCF = null; } } Row rv = new Row(key, returnCF); colIters.clear(); key = null; return rv; } }; return new RowIterator(reduced, iterators); }
public void testQuestionmark() throws IOException { RAMDirectory indexStore = new RAMDirectory(); IndexWriter writer = new IndexWriter(indexStore, new SimpleAnalyzer(), true); Document doc1 = new Document(); Document doc2 = new Document(); Document doc3 = new Document(); Document doc4 = new Document(); doc1.add(Field.Text("body", "metal")); doc2.add(Field.Text("body", "metals")); doc3.add(Field.Text("body", "mXtals")); doc4.add(Field.Text("body", "mXtXls")); writer.addDocument(doc1); writer.addDocument(doc2); writer.addDocument(doc3); writer.addDocument(doc4); writer.optimize(); IndexSearcher searcher = new IndexSearcher(indexStore); Query query1 = new TermQuery(new Term("body", "m?tal")); // 1 Query query2 = new WildcardQuery(new Term("body", "metal?")); // 2 Query query3 = new WildcardQuery(new Term("body", "metals?")); // 1 Query query4 = new WildcardQuery(new Term("body", "m?t?ls")); // 3 Hits result; result = searcher.search(query1); assertEquals(1, result.length()); result = searcher.search(query2); assertEquals(2, result.length()); result = searcher.search(query3); assertEquals(1, result.length()); result = searcher.search(query4); assertEquals(3, result.length()); writer.close(); }
public void testQuestionmark() throws IOException { RAMDirectory indexStore = new RAMDirectory(); IndexWriter writer = new IndexWriter(indexStore, new SimpleAnalyzer(), true); Document doc1 = new Document(); Document doc2 = new Document(); Document doc3 = new Document(); Document doc4 = new Document(); doc1.add(Field.Text("body", "metal")); doc2.add(Field.Text("body", "metals")); doc3.add(Field.Text("body", "mXtals")); doc4.add(Field.Text("body", "mXtXls")); writer.addDocument(doc1); writer.addDocument(doc2); writer.addDocument(doc3); writer.addDocument(doc4); writer.optimize(); IndexSearcher searcher = new IndexSearcher(indexStore); Query query1 = new WildcardQuery(new Term("body", "m?tal")); // 1 Query query2 = new WildcardQuery(new Term("body", "metal?")); // 2 Query query3 = new WildcardQuery(new Term("body", "metals?")); // 1 Query query4 = new WildcardQuery(new Term("body", "m?t?ls")); // 3 Hits result; result = searcher.search(query1); assertEquals(1, result.length()); result = searcher.search(query2); assertEquals(2, result.length()); result = searcher.search(query3); assertEquals(1, result.length()); result = searcher.search(query4); assertEquals(3, result.length()); writer.close(); }
public static void beforeClass() throws Exception { initCore("solrConfig.xml", "schema.xml"); numberOfDocs = 0; for (String[] doc : DOCUMENTS) { assertNull(h.validateUpdate(adoc("id", Integer.toString(numberOfDocs), "url", doc[0], "title", doc[1], "snippet", doc[2]))); numberOfDocs++; } assertNull(h.validateUpdate(commit())); }
public static void beforeClass() throws Exception { initCore("solrconfig.xml", "schema.xml"); numberOfDocs = 0; for (String[] doc : DOCUMENTS) { assertNull(h.validateUpdate(adoc("id", Integer.toString(numberOfDocs), "url", doc[0], "title", doc[1], "snippet", doc[2]))); numberOfDocs++; } assertNull(h.validateUpdate(commit())); }
public static AbstractType getComparator(String tableName, String cfName) { assert tableName != null; CFMetaData cfmd = getCFMetaData(tableName, cfName); if (cfmd == null) throw new NullPointerException("Unknown ColumnFamily " + cfName + " in keyspace " + tableName); return cfmd.comparator; }
public static AbstractType getComparator(String tableName, String cfName) { assert tableName != null; CFMetaData cfmd = getCFMetaData(tableName, cfName); if (cfmd == null) throw new IllegalArgumentException("Unknown ColumnFamily " + cfName + " in keyspace " + tableName); return cfmd.comparator; }
public ChunkedWriter(Configuration conf, int chunkSizeInMB, Path output) throws IOException { this.output = output; this.conf = conf; if (chunkSizeInMB > 1984) { chunkSizeInMB = 1984; } maxChunkSizeInBytes = chunkSizeInMB * 1024 * 1024; fs = FileSystem.get(conf); currentChunkID = 0; writer = new SequenceFile.Writer(fs, conf, getPath(currentChunkID), Text.class, Text.class); }
public ChunkedWriter(Configuration conf, int chunkSizeInMB, Path output) throws IOException { this.output = output; this.conf = conf; if (chunkSizeInMB > 1984) { chunkSizeInMB = 1984; } maxChunkSizeInBytes = chunkSizeInMB * 1024 * 1024; fs = FileSystem.get(output.toUri(), conf); currentChunkID = 0; writer = new SequenceFile.Writer(fs, conf, getPath(currentChunkID), Text.class, Text.class); }
public static final CharacterRunAutomaton SIMPLE = new CharacterRunAutomaton(new RegExp("[A-Za-zªµºÀ-ÖØ-öø-Z]+").toAutomaton());
public static final CharacterRunAutomaton SIMPLE = new CharacterRunAutomaton(new RegExp("[A-Za-zªµºÀ-ÖØ-öø-ˁ]+").toAutomaton());
public void testImportDataExportQueryLobsInExtFile() throws SQLException, IOException { doExportQueryLobsToExtFile("select * from BOOKS", fileName, null, null, "8859_1", lobsFileName); doImportDataLobsFromExtFile(null, "BOOKS_IMP", null, null, fileName, null, null , "8859_1", 0); verifyData(" * "); // perform import with column names specified in random order. doImportDataLobsFromExtFile(null, "BOOKS_IMP", "PIC, CONTENT, NAME, ID", "4, 3, 2, 1", fileName, null, null, null, 1); verifyData("PIC, CONTENT, NAME, ID"); //DERBY-2925: need to delete export files first SupportFilesSetup.deleteFile(fileName); SupportFilesSetup.deleteFile(lobsFileName); // test with non-default delimiters. doExportQueryLobsToExtFile("select * from BOOKS_IMP", fileName, ";", "%" , null, lobsFileName); doImportDataLobsFromExtFile(null, "BOOKS_IMP", null, null, fileName, ";", "%", null, 1); }
public void testImportDataExportQueryLobsInExtFile() throws SQLException, IOException { doExportQueryLobsToExtFile("select * from BOOKS", fileName, null, null, "8859_1", lobsFileName); doImportDataLobsFromExtFile(null, "BOOKS_IMP", null, null, fileName, null, null , "8859_1", 0); verifyData(" * "); // perform import with column names specified in random order. doImportDataLobsFromExtFile(null, "BOOKS_IMP", "PIC, CONTENT, NAME, ID", "4, 3, 2, 1", fileName, null, null, "8859_1", 1); verifyData("PIC, CONTENT, NAME, ID"); //DERBY-2925: need to delete export files first SupportFilesSetup.deleteFile(fileName); SupportFilesSetup.deleteFile(lobsFileName); // test with non-default delimiters. doExportQueryLobsToExtFile("select * from BOOKS_IMP", fileName, ";", "%" , null, lobsFileName); doImportDataLobsFromExtFile(null, "BOOKS_IMP", null, null, fileName, ";", "%", null, 1); }
public ValueNode putAndsOnTop() throws StandardException { NodeFactory nodeFactory = getNodeFactory(); QueryTreeNode trueNode = nodeFactory.getNode( C_NodeTypes.BOOLEAN_CONSTANT_NODE, Boolean.TRUE, getContextManager()); AndNode andNode = (AndNode) nodeFactory.getNode( C_NodeTypes.AND_NODE, this, trueNode, getContextManager()); andNode.postBindFixup(); return andNode; }
public ValueNode putAndsOnTop() throws StandardException { NodeFactory nodeFactory = getNodeFactory(); QueryTreeNode trueNode = (QueryTreeNode) nodeFactory.getNode( C_NodeTypes.BOOLEAN_CONSTANT_NODE, Boolean.TRUE, getContextManager()); AndNode andNode = (AndNode) nodeFactory.getNode( C_NodeTypes.AND_NODE, this, trueNode, getContextManager()); andNode.postBindFixup(); return andNode; }
public ValueNode preprocess(int numTables, FromList outerFromList, SubqueryList outerSubqueryList, PredicateList outerPredicateList) throws StandardException { ValueNode leftClone1; ValueNode rightOperand; /* We must 1st preprocess the component parts */ super.preprocess(numTables, outerFromList, outerSubqueryList, outerPredicateList); /* This is where we do the transformation for BETWEEN to make it optimizable. * c1 BETWEEN value1 AND value2 -> c1 >= value1 AND c1 <= value2 * This transformation is only done if the leftOperand is a ColumnReference. */ if (!(leftOperand instanceof ColumnReference)) { return this; } /* For some unknown reason we need to clone the leftOperand if it is * a ColumnReference because reusing them in Qualifiers for a scan * does not work. */ leftClone1 = leftOperand.getClone(); /* The transformed tree has to be normalized: * AND * / \ * >= AND * / \ * <= TRUE */ NodeFactory nodeFactory = getNodeFactory(); ContextManager cm = getContextManager(); QueryTreeNode trueNode = nodeFactory.getNode( C_NodeTypes.BOOLEAN_CONSTANT_NODE, Boolean.TRUE, cm); /* Create the AND <= */ BinaryComparisonOperatorNode lessEqual = (BinaryComparisonOperatorNode) nodeFactory.getNode( C_NodeTypes.BINARY_LESS_EQUALS_OPERATOR_NODE, leftClone1, rightOperandList.elementAt(1), cm); /* Set type info for the operator node */ lessEqual.bindComparisonOperator(); /* Create the AND */ AndNode newAnd = (AndNode) nodeFactory.getNode( C_NodeTypes.AND_NODE, lessEqual, trueNode, cm); newAnd.postBindFixup(); /* Create the AND >= */ BinaryComparisonOperatorNode greaterEqual = (BinaryComparisonOperatorNode) nodeFactory.getNode( C_NodeTypes.BINARY_GREATER_EQUALS_OPERATOR_NODE, leftOperand, rightOperandList.elementAt(0), cm); /* Set type info for the operator node */ greaterEqual.bindComparisonOperator(); /* Create the AND */ newAnd = (AndNode) nodeFactory.getNode( C_NodeTypes.AND_NODE, greaterEqual, newAnd, cm); newAnd.postBindFixup(); /* Tell optimizer to use the between selectivity instead of >= * <= selectivities */ lessEqual.setBetweenSelectivity(); greaterEqual.setBetweenSelectivity(); return newAnd; }
public ValueNode preprocess(int numTables, FromList outerFromList, SubqueryList outerSubqueryList, PredicateList outerPredicateList) throws StandardException { ValueNode leftClone1; ValueNode rightOperand; /* We must 1st preprocess the component parts */ super.preprocess(numTables, outerFromList, outerSubqueryList, outerPredicateList); /* This is where we do the transformation for BETWEEN to make it optimizable. * c1 BETWEEN value1 AND value2 -> c1 >= value1 AND c1 <= value2 * This transformation is only done if the leftOperand is a ColumnReference. */ if (!(leftOperand instanceof ColumnReference)) { return this; } /* For some unknown reason we need to clone the leftOperand if it is * a ColumnReference because reusing them in Qualifiers for a scan * does not work. */ leftClone1 = leftOperand.getClone(); /* The transformed tree has to be normalized: * AND * / \ * >= AND * / \ * <= TRUE */ NodeFactory nodeFactory = getNodeFactory(); ContextManager cm = getContextManager(); QueryTreeNode trueNode = (QueryTreeNode) nodeFactory.getNode( C_NodeTypes.BOOLEAN_CONSTANT_NODE, Boolean.TRUE, cm); /* Create the AND <= */ BinaryComparisonOperatorNode lessEqual = (BinaryComparisonOperatorNode) nodeFactory.getNode( C_NodeTypes.BINARY_LESS_EQUALS_OPERATOR_NODE, leftClone1, rightOperandList.elementAt(1), cm); /* Set type info for the operator node */ lessEqual.bindComparisonOperator(); /* Create the AND */ AndNode newAnd = (AndNode) nodeFactory.getNode( C_NodeTypes.AND_NODE, lessEqual, trueNode, cm); newAnd.postBindFixup(); /* Create the AND >= */ BinaryComparisonOperatorNode greaterEqual = (BinaryComparisonOperatorNode) nodeFactory.getNode( C_NodeTypes.BINARY_GREATER_EQUALS_OPERATOR_NODE, leftOperand, rightOperandList.elementAt(0), cm); /* Set type info for the operator node */ greaterEqual.bindComparisonOperator(); /* Create the AND */ newAnd = (AndNode) nodeFactory.getNode( C_NodeTypes.AND_NODE, greaterEqual, newAnd, cm); newAnd.postBindFixup(); /* Tell optimizer to use the between selectivity instead of >= * <= selectivities */ lessEqual.setBetweenSelectivity(); greaterEqual.setBetweenSelectivity(); return newAnd; }
private int assertFiringOrder(String iud, int modifiedRowCount, boolean noAfter) { List fires = (List) TRIGGER_INFO.get(); int lastOrder = -1; String lastBefore = null; for (Iterator i = fires.iterator(); i.hasNext(); ) { String info = i.next().toString(); StringTokenizer st = new StringTokenizer(info, ","); assertEquals(4, st.countTokens()); st.hasMoreTokens(); int order = Integer.valueOf(st.nextToken()).intValue(); st.hasMoreTokens(); String before = st.nextToken(); st.hasMoreTokens(); String fiud = st.nextToken(); st.hasMoreTokens(); String row = st.nextToken(); assertEquals("Incorrect trigger firing:"+info, iud, fiud); if (modifiedRowCount == 0) assertEquals("Row trigger firing on no rows", "STATEMENT", row); if (noAfter) assertFalse("No AFTER triggers", "AFTER".equals(before)); // First trigger. if (lastOrder == -1) { lastOrder = order; lastBefore = before; continue; } // Same trigger as last one. if (lastBefore.equals(before)) { // for multiple rows the trigger can match the previous one. boolean orderOk = modifiedRowCount > 1 ? (order >= lastOrder) : (order > lastOrder); assertTrue("matching triggers need to be fired in order creation:" +info, orderOk); lastOrder = order; continue; } // switching from a before trigger to an after trigger. assertEquals("BEFORE before AFTER:"+info, "NO CASCADE BEFORE", lastBefore); assertEquals("then AFTER:"+info, "AFTER", before); lastBefore = before; lastOrder = order; } return fires.size(); }
private int assertFiringOrder(String iud, int modifiedRowCount, boolean noAfter) { List fires = (List) TRIGGER_INFO.get(); int lastOrder = -1; String lastBefore = null; for (Iterator i = fires.iterator(); i.hasNext(); ) { String info = i.next().toString(); StringTokenizer st = new StringTokenizer(info, ","); assertEquals(4, st.countTokens()); st.hasMoreTokens(); int order = Integer.valueOf(st.nextToken()).intValue(); st.hasMoreTokens(); String before = st.nextToken(); st.hasMoreTokens(); String fiud = st.nextToken(); st.hasMoreTokens(); String row = st.nextToken(); assertEquals("Incorrect trigger firing:"+info, iud, fiud); if (modifiedRowCount == 0) assertEquals("Row trigger firing on no rows", "STATEMENT", row); if (noAfter) assertFalse("No AFTER triggers", "AFTER".equals(before)); // First trigger. if (lastOrder == -1) { lastOrder = order; lastBefore = before; continue; } // Same trigger as last one. if (lastBefore.equals(before)) { // for multiple rows the trigger can match the previous one. boolean orderOk = modifiedRowCount > 1 ? (order >= lastOrder) : (order > lastOrder); assertTrue("matching triggers need to be fired in order creation:" +info+". Triggers got fired in this order:"+TRIGGER_INFO.get().toString(), orderOk); lastOrder = order; continue; } // switching from a before trigger to an after trigger. assertEquals("BEFORE before AFTER:"+info, "NO CASCADE BEFORE", lastBefore); assertEquals("then AFTER:"+info, "AFTER", before); lastBefore = before; lastOrder = order; } return fires.size(); }
private void processOtherUser(Object id, Collection<Item> relevantItems,
private static void processOtherUser(Object id, Collection<Item> relevantItems,
private String limitMsg;
public void newFieldWithAccessors(String getter, String setter, int methodModifers, boolean staticField, String type) { String vmType = factory.type(type).vmName(); methodModifers |= Modifier.FINAL; // add a field, field has same name as get method int fieldModifiers = Modifier.PRIVATE; if (staticField) fieldModifiers |= Modifier.STATIC; ClassMember field = classHold.addMember(getter, vmType, fieldModifiers); int cpi = classHold.addFieldReference(field); /* ** add the get method */ String sig = BCMethodDescriptor.get(BCMethodDescriptor.EMPTY, vmType, factory); ClassMember method = classHold.addMember(getter, sig, methodModifers); CodeChunk chunk = new CodeChunk(); // load 'this' if required if (!staticField) chunk.addInstr(VMOpcode.ALOAD_0); // this // get the field value chunk.addInstrU2((staticField ? VMOpcode.GETSTATIC : VMOpcode.GETFIELD), cpi); // and return it short vmTypeId = BCJava.vmTypeId(vmType); chunk.addInstr(CodeChunk.RETURN_OPCODE[vmTypeId]); int typeWidth = Type.width(vmTypeId); chunk.complete(null, classHold, method, typeWidth, 1); /* ** add the set method */ String[] pda = new String[1]; pda[0] = vmType; sig = new BCMethodDescriptor(pda, VMDescriptor.VOID, factory).toString(); method = classHold.addMember(setter, sig, methodModifers); chunk = new CodeChunk(); // load 'this' if required if (!staticField) chunk.addInstr(VMOpcode.ALOAD_0); // this // push the only parameter chunk.addInstr((short) (CodeChunk.LOAD_VARIABLE_FAST[vmTypeId] + 1)); // and set the field chunk.addInstrU2((staticField ? VMOpcode.PUTSTATIC : VMOpcode.PUTFIELD), cpi); chunk.addInstr(VMOpcode.RETURN); chunk.complete(null, classHold, method, typeWidth + (staticField ? 0 : 1), 1 + typeWidth); } /** * Add the fact that some class limit was exceeded while generating * the class. We create a set ofg them and report at the end, this * allows the generated class file to still be dumped. * @param mb * @param limitName * @param limit * @param value */ void addLimitExceeded(BCMethod mb, String limitName, int limit, int value) { StringBuffer sb = new StringBuffer(); if (limitMsg != null) { sb.append(limitMsg); sb.append(", "); } sb.append("method:"); sb.append(mb.getName()); sb.append(" "); sb.append(limitName); sb.append(" ("); sb.append(value); sb.append(" > "); sb.append(limit); sb.append(")"); limitMsg = sb.toString(); } }
public Collection<ColumnFamily> getColumnFamilies() { return modifications_.values(); } void addHints(RowMutation rm) throws IOException { for (ColumnFamily cf : rm.getColumnFamilies()) { byte[] combined = HintedHandOffManager.makeCombinedName(rm.getTable(), cf.metadata().cfName); QueryPath path = new QueryPath(HintedHandOffManager.HINTS_CF, rm.key(), combined); add(path, ArrayUtils.EMPTY_BYTE_ARRAY, new TimestampClock(System.currentTimeMillis())); } }
public Collection<ColumnFamily> getColumnFamilies() { return modifications_.values(); } void addHints(RowMutation rm) throws IOException { for (ColumnFamily cf : rm.getColumnFamilies()) { byte[] combined = HintedHandOffManager.makeCombinedName(rm.getTable(), cf.metadata().cfName); QueryPath path = new QueryPath(HintedHandOffManager.HINTS_CF, rm.key(), combined); add(path, ArrayUtils.EMPTY_BYTE_ARRAY, new TimestampClock(System.currentTimeMillis()), DatabaseDescriptor.getGcGraceInSeconds()); } }
protected static int mapJdbcTypeToDrdaType(int jdbcType, boolean nullable, AppRequester appRequester, int[] outlen) throws SQLException { int drdaType = 0; switch (jdbcType) { case Types.BOOLEAN: case java.sql.Types.BIT: case java.sql.Types.TINYINT: case java.sql.Types.SMALLINT: drdaType = DRDAConstants.DRDA_TYPE_NSMALL; outlen[0] = 2; break; case java.sql.Types.INTEGER: drdaType = DRDAConstants.DRDA_TYPE_NINTEGER; outlen[0] = 4; break; case java.sql.Types.BIGINT: drdaType = DRDAConstants.DRDA_TYPE_NINTEGER8; outlen[0] = 8; break; case java.sql.Types.REAL: drdaType = DRDAConstants.DRDA_TYPE_NFLOAT4; outlen[0] = 4; break; case java.sql.Types.DOUBLE: case java.sql.Types.FLOAT: drdaType = DRDAConstants.DRDA_TYPE_NFLOAT8; outlen[0] = 8; break; case java.sql.Types.NUMERIC: case java.sql.Types.DECIMAL: drdaType = DRDAConstants.DRDA_TYPE_NDECIMAL; //needs to be adjusted for actual value outlen[0] = -1; break; case java.sql.Types.DATE: drdaType = DRDAConstants.DRDA_TYPE_NDATE; outlen[0] = 10; break; case java.sql.Types.TIME: drdaType = DRDAConstants.DRDA_TYPE_NTIME; outlen[0] = 8; break; case java.sql.Types.TIMESTAMP: drdaType = DRDAConstants.DRDA_TYPE_NTIMESTAMP; outlen[0] = 26; break; case java.sql.Types.CHAR: // drdaType = DRDAConstants.DRDA_TYPE_NCHAR; //making this NVARMIX for now to handle different byte length //characters - checking with Paul to see if this is the //correct way to handle it. drdaType = DRDAConstants.DRDA_TYPE_NVARMIX; outlen[0] = -1; break; case java.sql.Types.VARCHAR: drdaType = DRDAConstants.DRDA_TYPE_NVARCHAR; outlen[0] = -1; break; // we will just convert a java object to a string // since jcc doesn't support it. case java.sql.Types.JAVA_OBJECT: //boolean b = false; //if ( b ) if ( appRequester.supportsUDTs() ) { drdaType = DRDAConstants.DRDA_TYPE_NUDT; outlen[0] = -1; } else { drdaType = DRDAConstants.DRDA_TYPE_NLONG; outlen[0] = LONGVARCHAR_MAX_LEN; } break; case java.sql.Types.LONGVARCHAR: drdaType = DRDAConstants.DRDA_TYPE_NLONG; outlen[0] = LONGVARCHAR_MAX_LEN; break; case java.sql.Types.BINARY: case java.sql.Types.VARBINARY: drdaType = DRDAConstants.DRDA_TYPE_NVARBYTE; outlen[0] = -1; break; case java.sql.Types.LONGVARBINARY: drdaType = DRDAConstants.DRDA_TYPE_NLONGVARBYTE; outlen[0] = LONGVARBINARY_MAX_LEN; break; // blob begin // merge BLOB and BLOB_LOCATOR ???? case java.sql.Types.BLOB: drdaType = DRDAConstants.DRDA_TYPE_NLOBBYTES; // indicates fdocadata is a place holder with 4 byte length outlen[0] = 0x8004; break; case java.sql.Types.CLOB: drdaType = DRDAConstants.DRDA_TYPE_NLOBCMIXED; outlen[0] = 0x8004; break; // blob end case java.sql.Types.ARRAY: case java.sql.Types.DISTINCT: case java.sql.Types.NULL: case java.sql.Types.OTHER: case java.sql.Types.REF: case java.sql.Types.STRUCT: throw new SQLException("Jdbc type" + jdbcType + "not Supported yet"); default: throw new SQLException ("unrecognized sql type: " + jdbcType); } if (!nullable) drdaType--; return drdaType; }
protected static int mapJdbcTypeToDrdaType(int jdbcType, boolean nullable, AppRequester appRequester, int[] outlen) throws SQLException { int drdaType = 0; switch (jdbcType) { case Types.BOOLEAN: case java.sql.Types.BIT: case java.sql.Types.TINYINT: case java.sql.Types.SMALLINT: drdaType = DRDAConstants.DRDA_TYPE_NSMALL; outlen[0] = 2; break; case java.sql.Types.INTEGER: drdaType = DRDAConstants.DRDA_TYPE_NINTEGER; outlen[0] = 4; break; case java.sql.Types.BIGINT: drdaType = DRDAConstants.DRDA_TYPE_NINTEGER8; outlen[0] = 8; break; case java.sql.Types.REAL: drdaType = DRDAConstants.DRDA_TYPE_NFLOAT4; outlen[0] = 4; break; case java.sql.Types.DOUBLE: case java.sql.Types.FLOAT: drdaType = DRDAConstants.DRDA_TYPE_NFLOAT8; outlen[0] = 8; break; case java.sql.Types.NUMERIC: case java.sql.Types.DECIMAL: drdaType = DRDAConstants.DRDA_TYPE_NDECIMAL; //needs to be adjusted for actual value outlen[0] = -1; break; case java.sql.Types.DATE: drdaType = DRDAConstants.DRDA_TYPE_NDATE; outlen[0] = 10; break; case java.sql.Types.TIME: drdaType = DRDAConstants.DRDA_TYPE_NTIME; outlen[0] = 8; break; case java.sql.Types.TIMESTAMP: drdaType = DRDAConstants.DRDA_TYPE_NTIMESTAMP; outlen[0] = appRequester.getTimestampLength(); break; case java.sql.Types.CHAR: // drdaType = DRDAConstants.DRDA_TYPE_NCHAR; //making this NVARMIX for now to handle different byte length //characters - checking with Paul to see if this is the //correct way to handle it. drdaType = DRDAConstants.DRDA_TYPE_NVARMIX; outlen[0] = -1; break; case java.sql.Types.VARCHAR: drdaType = DRDAConstants.DRDA_TYPE_NVARCHAR; outlen[0] = -1; break; // we will just convert a java object to a string // since jcc doesn't support it. case java.sql.Types.JAVA_OBJECT: //boolean b = false; //if ( b ) if ( appRequester.supportsUDTs() ) { drdaType = DRDAConstants.DRDA_TYPE_NUDT; outlen[0] = -1; } else { drdaType = DRDAConstants.DRDA_TYPE_NLONG; outlen[0] = LONGVARCHAR_MAX_LEN; } break; case java.sql.Types.LONGVARCHAR: drdaType = DRDAConstants.DRDA_TYPE_NLONG; outlen[0] = LONGVARCHAR_MAX_LEN; break; case java.sql.Types.BINARY: case java.sql.Types.VARBINARY: drdaType = DRDAConstants.DRDA_TYPE_NVARBYTE; outlen[0] = -1; break; case java.sql.Types.LONGVARBINARY: drdaType = DRDAConstants.DRDA_TYPE_NLONGVARBYTE; outlen[0] = LONGVARBINARY_MAX_LEN; break; // blob begin // merge BLOB and BLOB_LOCATOR ???? case java.sql.Types.BLOB: drdaType = DRDAConstants.DRDA_TYPE_NLOBBYTES; // indicates fdocadata is a place holder with 4 byte length outlen[0] = 0x8004; break; case java.sql.Types.CLOB: drdaType = DRDAConstants.DRDA_TYPE_NLOBCMIXED; outlen[0] = 0x8004; break; // blob end case java.sql.Types.ARRAY: case java.sql.Types.DISTINCT: case java.sql.Types.NULL: case java.sql.Types.OTHER: case java.sql.Types.REF: case java.sql.Types.STRUCT: throw new SQLException("Jdbc type" + jdbcType + "not Supported yet"); default: throw new SQLException ("unrecognized sql type: " + jdbcType); } if (!nullable) drdaType--; return drdaType; }
private void describeKeySpace(String keySpaceName, KsDef metadata) throws TException { NodeProbe probe = sessionState.getNodeProbe(); // getting compaction manager MBean to displaying index building information CompactionManagerMBean compactionManagerMBean = (probe == null) ? null : probe.getCompactionManagerProxy(); // Describe and display sessionState.out.println("Keyspace: " + keySpaceName + ":"); try { KsDef ks_def; ks_def = metadata == null ? thriftClient.describe_keyspace(keySpaceName) : metadata; sessionState.out.println(" Replication Strategy: " + ks_def.strategy_class); sessionState.out.println(" Durable Writes: " + ks_def.durable_writes); Map<String, String> options = ks_def.strategy_options; sessionState.out.println(" Options: [" + ((options == null) ? "" : FBUtilities.toString(options)) + "]"); sessionState.out.println(" Column Families:"); boolean isSuper; Collections.sort(ks_def.cf_defs, new CfDefNamesComparator()); for (CfDef cf_def : ks_def.cf_defs) { // fetching bean for current column family store ColumnFamilyStoreMBean cfMBean = (probe == null) ? null : probe.getCfsProxy(ks_def.getName(), cf_def.getName()); isSuper = cf_def.column_type.equals("Super"); sessionState.out.printf(" ColumnFamily: %s%s%n", cf_def.name, isSuper ? " (Super)" : ""); if (cf_def.comment != null && !cf_def.comment.isEmpty()) { sessionState.out.printf(" \"%s\"%n", cf_def.comment); } if (cf_def.key_validation_class != null) sessionState.out.printf(" Key Validation Class: %s%n", cf_def.key_validation_class); if (cf_def.default_validation_class != null) sessionState.out.printf(" Default column value validator: %s%n", cf_def.default_validation_class); sessionState.out.printf(" Columns sorted by: %s%s%n", cf_def.comparator_type, cf_def.column_type.equals("Super") ? "/" + cf_def.subcomparator_type : ""); sessionState.out.printf(" Row cache size / save period in seconds: %s/%s%n", cf_def.row_cache_size, cf_def.row_cache_save_period_in_seconds); sessionState.out.printf(" Key cache size / save period in seconds: %s/%s%n", cf_def.key_cache_size, cf_def.key_cache_save_period_in_seconds); sessionState.out.printf(" Memtable thresholds: %s/%s/%s (millions of ops/MB/minutes)%n", cf_def.memtable_operations_in_millions, cf_def.memtable_flush_after_mins, cf_def.memtable_throughput_in_mb); sessionState.out.printf(" GC grace seconds: %s%n", cf_def.gc_grace_seconds); sessionState.out.printf(" Compaction min/max thresholds: %s/%s%n", cf_def.min_compaction_threshold, cf_def.max_compaction_threshold); sessionState.out.printf(" Read repair chance: %s%n", cf_def.read_repair_chance); sessionState.out.printf(" Replicate on write: %s%n", cf_def.replicate_on_write); // if we have connection to the cfMBean established if (cfMBean != null) { sessionState.out.printf(" Built indexes: %s%n", cfMBean.getBuiltIndexes()); } if (cf_def.getColumn_metadataSize() != 0) { String leftSpace = " "; String columnLeftSpace = leftSpace + " "; String compareWith = isSuper ? cf_def.subcomparator_type : cf_def.comparator_type; AbstractType columnNameValidator = getFormatType(compareWith); sessionState.out.println(leftSpace + "Column Metadata:"); for (ColumnDef columnDef : cf_def.getColumn_metadata()) { String columnName = columnNameValidator.getString(columnDef.name); if (columnNameValidator instanceof BytesType) { try { String columnString = UTF8Type.instance.getString(columnDef.name); columnName = columnString + " (" + columnName + ")"; } catch (MarshalException e) { // guess it wasn't a utf8 column name after all } } sessionState.out.println(leftSpace + " Column Name: " + columnName); sessionState.out.println(columnLeftSpace + "Validation Class: " + columnDef.getValidation_class()); if (columnDef.isSetIndex_name()) { sessionState.out.println(columnLeftSpace + "Index Name: " + columnDef.getIndex_name()); } if (columnDef.isSetIndex_type()) { sessionState.out.println(columnLeftSpace + "Index Type: " + columnDef.getIndex_type().name()); } } } } // compaction manager information if (compactionManagerMBean != null) { for (CompactionInfo info : compactionManagerMBean.getCompactions()) { // if ongoing compaction type is index build if (info.getTaskType() != CompactionType.INDEX_BUILD) continue; sessionState.out.printf("%nCurrently building index %s, completed %d of %d bytes.%n", info.getColumnFamily(), info.getBytesComplete(), info.getTotalBytes()); } } // closing JMX connection if (probe != null) probe.close(); } catch (InvalidRequestException e) { sessionState.out.println("Invalid request: " + e); } catch (NotFoundException e) { sessionState.out.println("Keyspace " + keySpaceName + " could not be found."); } catch (IOException e) { sessionState.out.println("Error while closing JMX connection: " + e.getMessage()); } }
private void describeKeySpace(String keySpaceName, KsDef metadata) throws TException { NodeProbe probe = sessionState.getNodeProbe(); // getting compaction manager MBean to displaying index building information CompactionManagerMBean compactionManagerMBean = (probe == null) ? null : probe.getCompactionManagerProxy(); // Describe and display sessionState.out.println("Keyspace: " + keySpaceName + ":"); try { KsDef ks_def; ks_def = metadata == null ? thriftClient.describe_keyspace(keySpaceName) : metadata; sessionState.out.println(" Replication Strategy: " + ks_def.strategy_class); sessionState.out.println(" Durable Writes: " + ks_def.durable_writes); Map<String, String> options = ks_def.strategy_options; sessionState.out.println(" Options: [" + ((options == null) ? "" : FBUtilities.toString(options)) + "]"); sessionState.out.println(" Column Families:"); boolean isSuper; Collections.sort(ks_def.cf_defs, new CfDefNamesComparator()); for (CfDef cf_def : ks_def.cf_defs) { // fetching bean for current column family store ColumnFamilyStoreMBean cfMBean = (probe == null) ? null : probe.getCfsProxy(ks_def.getName(), cf_def.getName()); isSuper = cf_def.column_type.equals("Super"); sessionState.out.printf(" ColumnFamily: %s%s%n", cf_def.name, isSuper ? " (Super)" : ""); if (cf_def.comment != null && !cf_def.comment.isEmpty()) { sessionState.out.printf(" \"%s\"%n", cf_def.comment); } if (cf_def.key_validation_class != null) sessionState.out.printf(" Key Validation Class: %s%n", cf_def.key_validation_class); if (cf_def.default_validation_class != null) sessionState.out.printf(" Default column value validator: %s%n", cf_def.default_validation_class); sessionState.out.printf(" Columns sorted by: %s%s%n", cf_def.comparator_type, cf_def.column_type.equals("Super") ? "/" + cf_def.subcomparator_type : ""); sessionState.out.printf(" Row cache size / save period in seconds: %s/%s%n", cf_def.row_cache_size, cf_def.row_cache_save_period_in_seconds); sessionState.out.printf(" Key cache size / save period in seconds: %s/%s%n", cf_def.key_cache_size, cf_def.key_cache_save_period_in_seconds); sessionState.out.printf(" Memtable thresholds: %s/%s/%s (millions of ops/minutes/MB)%n", cf_def.memtable_operations_in_millions, cf_def.memtable_flush_after_mins, cf_def.memtable_throughput_in_mb); sessionState.out.printf(" GC grace seconds: %s%n", cf_def.gc_grace_seconds); sessionState.out.printf(" Compaction min/max thresholds: %s/%s%n", cf_def.min_compaction_threshold, cf_def.max_compaction_threshold); sessionState.out.printf(" Read repair chance: %s%n", cf_def.read_repair_chance); sessionState.out.printf(" Replicate on write: %s%n", cf_def.replicate_on_write); // if we have connection to the cfMBean established if (cfMBean != null) { sessionState.out.printf(" Built indexes: %s%n", cfMBean.getBuiltIndexes()); } if (cf_def.getColumn_metadataSize() != 0) { String leftSpace = " "; String columnLeftSpace = leftSpace + " "; String compareWith = isSuper ? cf_def.subcomparator_type : cf_def.comparator_type; AbstractType columnNameValidator = getFormatType(compareWith); sessionState.out.println(leftSpace + "Column Metadata:"); for (ColumnDef columnDef : cf_def.getColumn_metadata()) { String columnName = columnNameValidator.getString(columnDef.name); if (columnNameValidator instanceof BytesType) { try { String columnString = UTF8Type.instance.getString(columnDef.name); columnName = columnString + " (" + columnName + ")"; } catch (MarshalException e) { // guess it wasn't a utf8 column name after all } } sessionState.out.println(leftSpace + " Column Name: " + columnName); sessionState.out.println(columnLeftSpace + "Validation Class: " + columnDef.getValidation_class()); if (columnDef.isSetIndex_name()) { sessionState.out.println(columnLeftSpace + "Index Name: " + columnDef.getIndex_name()); } if (columnDef.isSetIndex_type()) { sessionState.out.println(columnLeftSpace + "Index Type: " + columnDef.getIndex_type().name()); } } } } // compaction manager information if (compactionManagerMBean != null) { for (CompactionInfo info : compactionManagerMBean.getCompactions()) { // if ongoing compaction type is index build if (info.getTaskType() != CompactionType.INDEX_BUILD) continue; sessionState.out.printf("%nCurrently building index %s, completed %d of %d bytes.%n", info.getColumnFamily(), info.getBytesComplete(), info.getTotalBytes()); } } // closing JMX connection if (probe != null) probe.close(); } catch (InvalidRequestException e) { sessionState.out.println("Invalid request: " + e); } catch (NotFoundException e) { sessionState.out.println("Keyspace " + keySpaceName + " could not be found."); } catch (IOException e) { sessionState.out.println("Error while closing JMX connection: " + e.getMessage()); } }
public void doFilter(ServletRequest request, ServletResponse response, FilterChain chain) throws IOException, ServletException { if( abortErrorMessage != null ) { ((HttpServletResponse)response).sendError( 500, abortErrorMessage ); return; } if (this.cores == null) { ((HttpServletResponse)response).sendError( 403, "Server is shutting down" ); return; } CoreContainer cores = this.cores; SolrCore core = null; SolrQueryRequest solrReq = null; if( request instanceof HttpServletRequest) { HttpServletRequest req = (HttpServletRequest)request; HttpServletResponse resp = (HttpServletResponse)response; SolrRequestHandler handler = null; String corename = ""; try { // put the core container in request attribute req.setAttribute("org.apache.solr.CoreContainer", cores); String path = req.getServletPath(); if( req.getPathInfo() != null ) { // this lets you handle /update/commit when /update is a servlet path += req.getPathInfo(); } if( pathPrefix != null && path.startsWith( pathPrefix ) ) { path = path.substring( pathPrefix.length() ); } // check for management path String alternate = cores.getManagementPath(); if (alternate != null && path.startsWith(alternate)) { path = path.substring(0, alternate.length()); } // unused feature ? int idx = path.indexOf( ':' ); if( idx > 0 ) { // save the portion after the ':' for a 'handler' path parameter path = path.substring( 0, idx ); } // Check for the core admin page if( path.equals( cores.getAdminPath() ) ) { handler = cores.getMultiCoreHandler(); solrReq = adminRequestParser.parse(null,path, req); handleAdminRequest(req, response, handler, solrReq); return; } // Check for the core admin collections url if( path.equals( "/admin/collections" ) ) { handler = cores.getCollectionsHandler(); solrReq = adminRequestParser.parse(null,path, req); handleAdminRequest(req, response, handler, solrReq); return; } else { //otherwise, we should find a core from the path idx = path.indexOf( "/", 1 ); if( idx > 1 ) { // try to get the corename as a request parameter first corename = path.substring( 1, idx ); core = cores.getCore(corename); if (core != null) { path = path.substring( idx ); } } if (core == null) { if (!cores.isZooKeeperAware() ) { core = cores.getCore(""); } } } if (core == null && cores.isZooKeeperAware()) { // we couldn't find the core - lets make sure a collection was not specified instead core = getCoreByCollection(cores, corename, path); if (core != null) { // we found a core, update the path path = path.substring( idx ); } else { // try the default core core = cores.getCore(""); } // TODO: if we couldn't find it locally, look on other nodes } // With a valid core... if( core != null ) { final SolrConfig config = core.getSolrConfig(); // get or create/cache the parser for the core SolrRequestParsers parser = null; parser = parsers.get(config); if( parser == null ) { parser = new SolrRequestParsers(config); parsers.put(config, parser ); } // Determine the handler from the url path if not set // (we might already have selected the cores handler) if( handler == null && path.length() > 1 ) { // don't match "" or "/" as valid path handler = core.getRequestHandler( path ); // no handler yet but allowed to handle select; let's check if( handler == null && parser.isHandleSelect() ) { if( "/select".equals( path ) || "/select/".equals( path ) ) { solrReq = parser.parse( core, path, req ); String qt = solrReq.getParams().get( CommonParams.QT ); handler = core.getRequestHandler( qt ); if( handler == null ) { throw new SolrException( SolrException.ErrorCode.BAD_REQUEST, "unknown handler: "+qt); } if( qt != null && qt.startsWith("/") && (handler instanceof ContentStreamHandlerBase)) { //For security reasons it's a bad idea to allow a leading '/', ex: /select?qt=/update see SOLR-3161 //There was no restriction from Solr 1.4 thru 3.5 and it's not supported for update handlers. throw new SolrException( SolrException.ErrorCode.BAD_REQUEST, "Invalid Request Handler ('qt'). Do not use /select to access: "+qt); } } } } // With a valid handler and a valid core... if( handler != null ) { // if not a /select, create the request if( solrReq == null ) { solrReq = parser.parse( core, path, req ); } final Method reqMethod = Method.getMethod(req.getMethod()); HttpCacheHeaderUtil.setCacheControlHeader(config, resp, reqMethod); // unless we have been explicitly told not to, do cache validation // if we fail cache validation, execute the query if (config.getHttpCachingConfig().isNever304() || !HttpCacheHeaderUtil.doCacheHeaderValidation(solrReq, req, reqMethod, resp)) { SolrQueryResponse solrRsp = new SolrQueryResponse(); /* even for HEAD requests, we need to execute the handler to * ensure we don't get an error (and to make sure the correct * QueryResponseWriter is selected and we get the correct * Content-Type) */ SolrRequestInfo.setRequestInfo(new SolrRequestInfo(solrReq, solrRsp)); this.execute( req, handler, solrReq, solrRsp ); HttpCacheHeaderUtil.checkHttpCachingVeto(solrRsp, resp, reqMethod); // add info to http headers //TODO: See SOLR-232 and SOLR-267. /*try { NamedList solrRspHeader = solrRsp.getResponseHeader(); for (int i=0; i<solrRspHeader.size(); i++) { ((javax.servlet.http.HttpServletResponse) response).addHeader(("Solr-" + solrRspHeader.getName(i)), String.valueOf(solrRspHeader.getVal(i))); } } catch (ClassCastException cce) { log.log(Level.WARNING, "exception adding response header log information", cce); }*/ QueryResponseWriter responseWriter = core.getQueryResponseWriter(solrReq); writeResponse(solrRsp, response, responseWriter, solrReq, reqMethod); } return; // we are done with a valid handler } } log.debug("no handler or core retrieved for " + path + ", follow through..."); } catch (Throwable ex) { sendError( core, solrReq, request, (HttpServletResponse)response, ex ); return; } finally { if( solrReq != null ) { solrReq.close(); } if (core != null) { core.close(); } SolrRequestInfo.clearRequestInfo(); } } // Otherwise let the webapp handle the request chain.doFilter(request, response); }
public void doFilter(ServletRequest request, ServletResponse response, FilterChain chain) throws IOException, ServletException { if( abortErrorMessage != null ) { ((HttpServletResponse)response).sendError( 500, abortErrorMessage ); return; } if (this.cores == null) { ((HttpServletResponse)response).sendError( 503, "Server is shutting down" ); return; } CoreContainer cores = this.cores; SolrCore core = null; SolrQueryRequest solrReq = null; if( request instanceof HttpServletRequest) { HttpServletRequest req = (HttpServletRequest)request; HttpServletResponse resp = (HttpServletResponse)response; SolrRequestHandler handler = null; String corename = ""; try { // put the core container in request attribute req.setAttribute("org.apache.solr.CoreContainer", cores); String path = req.getServletPath(); if( req.getPathInfo() != null ) { // this lets you handle /update/commit when /update is a servlet path += req.getPathInfo(); } if( pathPrefix != null && path.startsWith( pathPrefix ) ) { path = path.substring( pathPrefix.length() ); } // check for management path String alternate = cores.getManagementPath(); if (alternate != null && path.startsWith(alternate)) { path = path.substring(0, alternate.length()); } // unused feature ? int idx = path.indexOf( ':' ); if( idx > 0 ) { // save the portion after the ':' for a 'handler' path parameter path = path.substring( 0, idx ); } // Check for the core admin page if( path.equals( cores.getAdminPath() ) ) { handler = cores.getMultiCoreHandler(); solrReq = adminRequestParser.parse(null,path, req); handleAdminRequest(req, response, handler, solrReq); return; } // Check for the core admin collections url if( path.equals( "/admin/collections" ) ) { handler = cores.getCollectionsHandler(); solrReq = adminRequestParser.parse(null,path, req); handleAdminRequest(req, response, handler, solrReq); return; } else { //otherwise, we should find a core from the path idx = path.indexOf( "/", 1 ); if( idx > 1 ) { // try to get the corename as a request parameter first corename = path.substring( 1, idx ); core = cores.getCore(corename); if (core != null) { path = path.substring( idx ); } } if (core == null) { if (!cores.isZooKeeperAware() ) { core = cores.getCore(""); } } } if (core == null && cores.isZooKeeperAware()) { // we couldn't find the core - lets make sure a collection was not specified instead core = getCoreByCollection(cores, corename, path); if (core != null) { // we found a core, update the path path = path.substring( idx ); } else { // try the default core core = cores.getCore(""); } // TODO: if we couldn't find it locally, look on other nodes } // With a valid core... if( core != null ) { final SolrConfig config = core.getSolrConfig(); // get or create/cache the parser for the core SolrRequestParsers parser = null; parser = parsers.get(config); if( parser == null ) { parser = new SolrRequestParsers(config); parsers.put(config, parser ); } // Determine the handler from the url path if not set // (we might already have selected the cores handler) if( handler == null && path.length() > 1 ) { // don't match "" or "/" as valid path handler = core.getRequestHandler( path ); // no handler yet but allowed to handle select; let's check if( handler == null && parser.isHandleSelect() ) { if( "/select".equals( path ) || "/select/".equals( path ) ) { solrReq = parser.parse( core, path, req ); String qt = solrReq.getParams().get( CommonParams.QT ); handler = core.getRequestHandler( qt ); if( handler == null ) { throw new SolrException( SolrException.ErrorCode.BAD_REQUEST, "unknown handler: "+qt); } if( qt != null && qt.startsWith("/") && (handler instanceof ContentStreamHandlerBase)) { //For security reasons it's a bad idea to allow a leading '/', ex: /select?qt=/update see SOLR-3161 //There was no restriction from Solr 1.4 thru 3.5 and it's not supported for update handlers. throw new SolrException( SolrException.ErrorCode.BAD_REQUEST, "Invalid Request Handler ('qt'). Do not use /select to access: "+qt); } } } } // With a valid handler and a valid core... if( handler != null ) { // if not a /select, create the request if( solrReq == null ) { solrReq = parser.parse( core, path, req ); } final Method reqMethod = Method.getMethod(req.getMethod()); HttpCacheHeaderUtil.setCacheControlHeader(config, resp, reqMethod); // unless we have been explicitly told not to, do cache validation // if we fail cache validation, execute the query if (config.getHttpCachingConfig().isNever304() || !HttpCacheHeaderUtil.doCacheHeaderValidation(solrReq, req, reqMethod, resp)) { SolrQueryResponse solrRsp = new SolrQueryResponse(); /* even for HEAD requests, we need to execute the handler to * ensure we don't get an error (and to make sure the correct * QueryResponseWriter is selected and we get the correct * Content-Type) */ SolrRequestInfo.setRequestInfo(new SolrRequestInfo(solrReq, solrRsp)); this.execute( req, handler, solrReq, solrRsp ); HttpCacheHeaderUtil.checkHttpCachingVeto(solrRsp, resp, reqMethod); // add info to http headers //TODO: See SOLR-232 and SOLR-267. /*try { NamedList solrRspHeader = solrRsp.getResponseHeader(); for (int i=0; i<solrRspHeader.size(); i++) { ((javax.servlet.http.HttpServletResponse) response).addHeader(("Solr-" + solrRspHeader.getName(i)), String.valueOf(solrRspHeader.getVal(i))); } } catch (ClassCastException cce) { log.log(Level.WARNING, "exception adding response header log information", cce); }*/ QueryResponseWriter responseWriter = core.getQueryResponseWriter(solrReq); writeResponse(solrRsp, response, responseWriter, solrReq, reqMethod); } return; // we are done with a valid handler } } log.debug("no handler or core retrieved for " + path + ", follow through..."); } catch (Throwable ex) { sendError( core, solrReq, request, (HttpServletResponse)response, ex ); return; } finally { if( solrReq != null ) { solrReq.close(); } if (core != null) { core.close(); } SolrRequestInfo.clearRequestInfo(); } } // Otherwise let the webapp handle the request chain.doFilter(request, response); }
private String getHostAddress(String host) throws IOException { if (host == null) { String hostaddress; try { hostaddress = InetAddress.getLocalHost().getHostAddress(); } catch (UnknownHostException e) { hostaddress = "127.0.0.1"; // cannot resolve system hostname, fall through } // Re-get the IP again for "127.0.0.1", the other case we trust the hosts // file is right. if ("127.0.0.1".equals(hostaddress)) { Enumeration<NetworkInterface> netInterfaces = null; try { netInterfaces = NetworkInterface.getNetworkInterfaces(); while (netInterfaces.hasMoreElements()) { NetworkInterface ni = netInterfaces.nextElement(); Enumeration<InetAddress> ips = ni.getInetAddresses(); while (ips.hasMoreElements()) { InetAddress ip = ips.nextElement(); if (ip.isSiteLocalAddress()) { hostaddress = ip.getHostAddress(); } } } } catch (Throwable e) { SolrException.log(log, "Error while looking for a better host name than 127.0.0.1", e); } } host = "http://" + hostaddress; } else { Matcher m = URL_PREFIX.matcher(host); if (m.matches()) { String prefix = m.group(1); host = prefix + host; } else { host = "http://" + host; } } return host; }
private String getHostAddress(String host) throws IOException { if (host == null || host.length() == 0) { String hostaddress; try { hostaddress = InetAddress.getLocalHost().getHostAddress(); } catch (UnknownHostException e) { hostaddress = "127.0.0.1"; // cannot resolve system hostname, fall through } // Re-get the IP again for "127.0.0.1", the other case we trust the hosts // file is right. if ("127.0.0.1".equals(hostaddress)) { Enumeration<NetworkInterface> netInterfaces = null; try { netInterfaces = NetworkInterface.getNetworkInterfaces(); while (netInterfaces.hasMoreElements()) { NetworkInterface ni = netInterfaces.nextElement(); Enumeration<InetAddress> ips = ni.getInetAddresses(); while (ips.hasMoreElements()) { InetAddress ip = ips.nextElement(); if (ip.isSiteLocalAddress()) { hostaddress = ip.getHostAddress(); } } } } catch (Throwable e) { SolrException.log(log, "Error while looking for a better host name than 127.0.0.1", e); } } host = "http://" + hostaddress; } else { Matcher m = URL_PREFIX.matcher(host); if (m.matches()) { String prefix = m.group(1); host = prefix + host; } else { host = "http://" + host; } } return host; }
private Fields fields; /** @lucene.internal */ void storeFields(Fields fields) { this.fields = fields; } /** @lucene.internal */ Fields retrieveFields() { return fields; } }
private volatile Fields fields; /** @lucene.internal */ void storeFields(Fields fields) { this.fields = fields; } /** @lucene.internal */ Fields retrieveFields() { return fields; } }
public void testBuiltInFunctions() throws SQLException{ st = createStatement(); conn=getConnection(); conn.setAutoCommit(false); // built-in functions in a check constraint st.executeUpdate( "create table charTab (c1 char(4) check(CHAR(c1) = c1))"); st.executeUpdate( " insert into charTab values 'asdf'"); st.executeUpdate( " insert into charTab values 'fdsa'"); // beetle 5805 - support built-in function INT should fail // until beetle 5805 is implemented st.executeUpdate( "create table intTab (c1 int check(INT(1) = c1))"); st.executeUpdate( " insert into intTab values 1"); // this insert should fail, does not satisfy check constraint assertStatementError("23513", st, "insert into intTab values 2"); st.executeUpdate( " create table maxIntTab (c1 int check(INT(2147483647) > c1))"); st.executeUpdate( " insert into maxIntTab values 1"); // this insert should fail, does not satisfy check constraint assertStatementError("23513", st, "insert into maxIntTab values 2147483647"); conn.rollback(); // verify that inserts, updates and statements with forced // constraints are indeed dependent on the constraints st.executeUpdate( "create table t1(c1 int not null constraint asdf primary key)"); st.executeUpdate( " insert into t1 values 1, 2, 3, 4, 5"); conn.commit(); PreparedStatement pSt1 = prepareStatement( "insert into t1 values 1"); PreparedStatement pSt2 = prepareStatement( "update t1 set c1 = 3 where c1 = 4"); PreparedStatement pSt3 = prepareStatement( "select * from t1"); // the insert and update should fail, select should succeed assertStatementError("23505", pSt1); assertStatementError("23505", pSt2); rs = pSt3.executeQuery(); expColNames = new String [] {"C1"}; JDBC.assertColumnNames(rs, expColNames); expRS = new String [][] { {"1"}, {"2"}, {"3"}, {"4"}, {"5"} }; JDBC.assertFullResultSet(rs, expRS, true); st.executeUpdate( " alter table t1 drop constraint asdf"); // rollback and verify that constraints are enforced and // select succeeds conn.rollback(); assertStatementError("23505", pSt1); assertStatementError("23505", pSt2); rs = pSt3.executeQuery(); expColNames = new String [] {"C1"}; JDBC.assertColumnNames(rs, expColNames); expRS = new String [][] { {"1"}, {"2"}, {"3"}, {"4"}, {"5"} }; JDBC.assertFullResultSet(rs, expRS, true); st.executeUpdate( " drop table t1"); // check constraints with parameters st.executeUpdate( "create table t1(c1 int constraint asdf check(c1 = 1))"); pSt = prepareStatement( "insert into t1 values (?)"); rs = st.executeQuery( "values (1)"); rs.next(); rsmd = rs.getMetaData(); for (int i = 1; i <= rsmd.getColumnCount(); i++) pSt.setObject(i, rs.getObject(i)); assertUpdateCount(pSt, 1); // clean up st.executeUpdate( "drop table t1"); st.executeUpdate( " create table t1(active_flag char(2) " + "check(active_flag IN ('Y', 'N')), " + "araccount_active_flag char(2) " + "check(araccount_active_flag IN ('Y', 'N')), " + "automatic_refill_flag char(2) " + "check(automatic_refill_flag IN ('Y', 'N')), " + "call_when_ready_flag char(2) " + "check(call_when_ready_flag IN ('Y', 'N')), " + "compliance_flag char(2) check(compliance_flag IN " + "('Y', 'N')), delivery_flag char(2) " + "check(delivery_flag IN ('Y', 'N')), " + "double_count_flag char(2) check(double_count_flag " + "IN ('Y', 'N')), gender_ind char(2) check(gender_ind " + "IN ('M', 'F', 'U')), geriatric_flag char(2) " + "check(geriatric_flag IN ('Y', 'N')), " + "refuse_inquiry_flag char(2) " + "check(refuse_inquiry_flag IN ('Y', 'N')), " + "animal_flag char(2) check(animal_flag IN ('Y', " + "'N')), terminal_flag char(2) check(terminal_flag IN " + "('Y', 'N')), unit_flag char(2) check(unit_flag IN " + "('Y', 'N')), VIP_flag char(2) check(VIP_flag IN " + "('Y', 'N')), snap_cap_flag char(2) " + "check(snap_cap_flag IN ('Y', 'N')), " + "consent_on_file_flag char(2) " + "check(consent_on_file_flag IN ('Y', 'N')), " + "enlarged_SIG_flag char(2) check(enlarged_SIG_flag " + "IN ('Y', 'N')),aquired_patient_flag char(2) " + "check(aquired_patient_flag IN ('Y', 'N')))"); // bug 5622 - internal generated constraint names are // re-worked to match db2's naming convention. st.executeUpdate( "drop table t1"); st.executeUpdate( " create table t1 (c1 int not null primary key, c2 " + "int not null unique, c3 int check (c3>=0))"); st.executeUpdate( " alter table t1 add column c4 int not null default 1"); st.executeUpdate( " alter table t1 add constraint c4_unique UNIQUE(c4)"); st.executeUpdate( " alter table t1 add column c5 int check(c5 >= 0)"); rs = st.executeQuery( " select c.type from " + "sys.sysconstraints c, sys.systables t where " + "c.tableid = t.tableid and tablename='T1'"); expColNames = new String [] {"TYPE"}; JDBC.assertColumnNames(rs, expColNames); expRS = new String [][] { {"P"}, {"U"}, {"C"}, {"U"}, {"C"} }; JDBC.assertFullResultSet(rs, expRS, true); assertStatementError("42Y55", st, " drop table t2"); st.executeUpdate( " create table t2 (c21 int references t1)"); rs = st.executeQuery( " select c.type from " + "sys.sysconstraints c, sys.systables t where " + "c.tableid = t.tableid and tablename='T2'"); expColNames = new String [] {"TYPE"}; JDBC.assertColumnNames(rs, expColNames); expRS = new String [][] { {"F"} }; JDBC.assertFullResultSet(rs, expRS, true); assertStatementError("42Y55", st, " drop table t3"); st.executeUpdate( " create table t3 (c1 int check (c1 >= 0), c2 int " + "check (c2 >= 0), c3 int check (c3 >= 0), c4 int " + "check (c4 >= 0), c5 int check (c5 >= 0), c6 int " + "check (c6 >= 0), c7 int check (c7 >= 0), c8 int " + "check (c8 >= 0), c9 int check (c9 >= 0), c10 int " + "check (c10 >= 0), c11 int check (c11 >= 0), c12 int " + "check (c12 >= 0), c13 int check (c13 >= 0))"); rs = st.executeQuery( " select c.type from " + "sys.sysconstraints c, sys.systables t where " + "c.tableid = t.tableid and tablename='T3'"); expColNames = new String [] {"TYPE"}; JDBC.assertColumnNames(rs, expColNames); expRS = new String [][] { {"C"}, {"C"}, {"C"}, {"C"}, {"C"}, {"C"}, {"C"}, {"C"}, {"C"}, {"C"}, {"C"}, {"C"}, {"C"} }; JDBC.assertFullResultSet(rs, expRS, true); assertStatementError("42Y55", st, " drop table t4"); st.executeUpdate( " create table t4(c11 int not null, c12 int not " + "null, primary key (c11, c12))"); rs = st.executeQuery( " select c.type from " + "sys.sysconstraints c, sys.systables t where " + "c.tableid = t.tableid and tablename='T4'"); expColNames = new String [] {"TYPE"}; JDBC.assertColumnNames(rs, expColNames); expRS = new String [][] { {"P"} }; JDBC.assertFullResultSet(rs, expRS, true); // Cleanup: st.executeUpdate("drop table t4"); st.executeUpdate("drop table t3"); st.executeUpdate("drop table t2"); st.executeUpdate("drop table t1"); conn.commit(); // DERBY-2989 }
public void testBuiltInFunctions() throws SQLException{ st = createStatement(); conn=getConnection(); conn.setAutoCommit(false); // built-in functions in a check constraint st.executeUpdate( "create table charTab (c1 char(4) check(CHAR(c1) = c1))"); st.executeUpdate( " insert into charTab values 'asdf'"); st.executeUpdate( " insert into charTab values 'fdsa'"); // beetle 5805 - support built-in function INT should fail // until beetle 5805 is implemented st.executeUpdate( "create table intTab (c1 int check(INT(1) = c1))"); st.executeUpdate( " insert into intTab values 1"); // this insert should fail, does not satisfy check constraint assertStatementError("23513", st, "insert into intTab values 2"); st.executeUpdate( " create table maxIntTab (c1 int check(INT(2147483647) > c1))"); st.executeUpdate( " insert into maxIntTab values 1"); // this insert should fail, does not satisfy check constraint assertStatementError("23513", st, "insert into maxIntTab values 2147483647"); conn.rollback(); // verify that inserts, updates and statements with forced // constraints are indeed dependent on the constraints st.executeUpdate( "create table t1(c1 int not null constraint asdf primary key)"); st.executeUpdate( " insert into t1 values 1, 2, 3, 4, 5"); conn.commit(); PreparedStatement pSt1 = prepareStatement( "insert into t1 values 1"); PreparedStatement pSt2 = prepareStatement( "update t1 set c1 = 3 where c1 = 4"); PreparedStatement pSt3 = prepareStatement( "select * from t1"); // the insert and update should fail, select should succeed assertStatementError("23505", pSt1); assertStatementError("23505", pSt2); rs = pSt3.executeQuery(); expColNames = new String [] {"C1"}; JDBC.assertColumnNames(rs, expColNames); expRS = new String [][] { {"1"}, {"2"}, {"3"}, {"4"}, {"5"} }; JDBC.assertFullResultSet(rs, expRS, true); st.executeUpdate( " alter table t1 drop constraint asdf"); // rollback and verify that constraints are enforced and // select succeeds conn.rollback(); assertStatementError("23505", pSt1); assertStatementError("23505", pSt2); rs = pSt3.executeQuery(); expColNames = new String [] {"C1"}; JDBC.assertColumnNames(rs, expColNames); expRS = new String [][] { {"1"}, {"2"}, {"3"}, {"4"}, {"5"} }; JDBC.assertFullResultSet(rs, expRS, true); st.executeUpdate( " drop table t1"); // check constraints with parameters st.executeUpdate( "create table t1(c1 int constraint asdf check(c1 = 1))"); pSt = prepareStatement( "insert into t1 values (?)"); rs = st.executeQuery( "values (1)"); rs.next(); rsmd = rs.getMetaData(); for (int i = 1; i <= rsmd.getColumnCount(); i++) pSt.setObject(i, rs.getObject(i)); assertUpdateCount(pSt, 1); // clean up st.executeUpdate( "drop table t1"); st.executeUpdate( " create table t1(active_flag char(2) " + "check(active_flag IN ('Y', 'N')), " + "araccount_active_flag char(2) " + "check(araccount_active_flag IN ('Y', 'N')), " + "automatic_refill_flag char(2) " + "check(automatic_refill_flag IN ('Y', 'N')), " + "call_when_ready_flag char(2) " + "check(call_when_ready_flag IN ('Y', 'N')), " + "compliance_flag char(2) check(compliance_flag IN " + "('Y', 'N')), delivery_flag char(2) " + "check(delivery_flag IN ('Y', 'N')), " + "double_count_flag char(2) check(double_count_flag " + "IN ('Y', 'N')), gender_ind char(2) check(gender_ind " + "IN ('M', 'F', 'U')), geriatric_flag char(2) " + "check(geriatric_flag IN ('Y', 'N')), " + "refuse_inquiry_flag char(2) " + "check(refuse_inquiry_flag IN ('Y', 'N')), " + "animal_flag char(2) check(animal_flag IN ('Y', " + "'N')), terminal_flag char(2) check(terminal_flag IN " + "('Y', 'N')), unit_flag char(2) check(unit_flag IN " + "('Y', 'N')), VIP_flag char(2) check(VIP_flag IN " + "('Y', 'N')), snap_cap_flag char(2) " + "check(snap_cap_flag IN ('Y', 'N')), " + "consent_on_file_flag char(2) " + "check(consent_on_file_flag IN ('Y', 'N')), " + "enlarged_SIG_flag char(2) check(enlarged_SIG_flag " + "IN ('Y', 'N')),aquired_patient_flag char(2) " + "check(aquired_patient_flag IN ('Y', 'N')))"); // bug 5622 - internal generated constraint names are // re-worked to match db2's naming convention. st.executeUpdate( "drop table t1"); st.executeUpdate( " create table t1 (c1 int not null primary key, c2 " + "int not null unique, c3 int check (c3>=0))"); st.executeUpdate( " alter table t1 add column c4 int not null default 1"); st.executeUpdate( " alter table t1 add constraint c4_unique UNIQUE(c4)"); st.executeUpdate( " alter table t1 add column c5 int check(c5 >= 0)"); rs = st.executeQuery( " select c.type from " + "sys.sysconstraints c, sys.systables t where " + "c.tableid = t.tableid and tablename='T1'"); expColNames = new String [] {"TYPE"}; JDBC.assertColumnNames(rs, expColNames); expRS = new String [][] { {"P"}, {"U"}, {"C"}, {"U"}, {"C"} }; JDBC.assertUnorderedResultSet(rs, expRS, true); assertStatementError("42Y55", st, " drop table t2"); st.executeUpdate( " create table t2 (c21 int references t1)"); rs = st.executeQuery( " select c.type from " + "sys.sysconstraints c, sys.systables t where " + "c.tableid = t.tableid and tablename='T2'"); expColNames = new String [] {"TYPE"}; JDBC.assertColumnNames(rs, expColNames); expRS = new String [][] { {"F"} }; JDBC.assertFullResultSet(rs, expRS, true); assertStatementError("42Y55", st, " drop table t3"); st.executeUpdate( " create table t3 (c1 int check (c1 >= 0), c2 int " + "check (c2 >= 0), c3 int check (c3 >= 0), c4 int " + "check (c4 >= 0), c5 int check (c5 >= 0), c6 int " + "check (c6 >= 0), c7 int check (c7 >= 0), c8 int " + "check (c8 >= 0), c9 int check (c9 >= 0), c10 int " + "check (c10 >= 0), c11 int check (c11 >= 0), c12 int " + "check (c12 >= 0), c13 int check (c13 >= 0))"); rs = st.executeQuery( " select c.type from " + "sys.sysconstraints c, sys.systables t where " + "c.tableid = t.tableid and tablename='T3'"); expColNames = new String [] {"TYPE"}; JDBC.assertColumnNames(rs, expColNames); expRS = new String [][] { {"C"}, {"C"}, {"C"}, {"C"}, {"C"}, {"C"}, {"C"}, {"C"}, {"C"}, {"C"}, {"C"}, {"C"}, {"C"} }; JDBC.assertFullResultSet(rs, expRS, true); assertStatementError("42Y55", st, " drop table t4"); st.executeUpdate( " create table t4(c11 int not null, c12 int not " + "null, primary key (c11, c12))"); rs = st.executeQuery( " select c.type from " + "sys.sysconstraints c, sys.systables t where " + "c.tableid = t.tableid and tablename='T4'"); expColNames = new String [] {"TYPE"}; JDBC.assertColumnNames(rs, expColNames); expRS = new String [][] { {"P"} }; JDBC.assertFullResultSet(rs, expRS, true); // Cleanup: st.executeUpdate("drop table t4"); st.executeUpdate("drop table t3"); st.executeUpdate("drop table t2"); st.executeUpdate("drop table t1"); conn.commit(); // DERBY-2989 }
public Object run() throws StandardException { switch( actionCode) { case GET_FILE_NAME_ACTION: return privGetFileName( actionIdentity, actionStub, actionErrorOK, actionTryAlternatePath); case CREATE_CONTAINER_ACTION: { StorageFile file = privGetFileName( actionIdentity, false, false, false); try { if (file.exists()) { // note I'm left in the no-identity state as fillInIdentity() // hasn't been called. throw StandardException.newException( SQLState.FILE_EXISTS, file); } } catch (SecurityException se) { throw StandardException.newException( SQLState.FILE_CREATE, se, file); } try { // OK not to force WAL here, in fact, this operation // preceeds the creation of the log record to ensure // sufficient space. dataFactory.writeInProgress(); try { fileData = file.getRandomAccessFile( "rw"); file.limitAccessToOwner(); } finally { dataFactory.writeFinished(); } // This container format specifies that the first page is // an allocation page and the container information is // stored within it. The allocation page needs to be // somewhat formatted because if the system crashed after // the create container log operation is written, it needs // to be well formed enough to get the container // information back out of it. // // Don't try to go thru the page cache here because the // container object cannot be found in the container cache // at this point yet. However, if we use the page cache // to store the first allocation page, then in order to // write itself out, it needs to ask the container to do // so, which is going to create a deadlock. The // allocation page cannot write itself out without going // thru the container because it doesn't know where its // offset is. Here we effectively hardwire page 0 at // offset 0 of the container file to be the first // allocation page. // create an embryonic page - if this is not a temporary // container, synchronously write out the file header. canUpdate = true; // Need to set it now. After writeRAFHeader // may be too late in case that method's IO // is interrupted and container needs // reopening. To get the correct "rw" mode // we need canUpdate to be true. writeRAFHeader( actionIdentity, fileData, true, (actionIdentity.getSegmentId() != ContainerHandle.TEMPORARY_SEGMENT)); } catch (IOException ioe) { canUpdate = false; boolean fileDeleted; try { fileDeleted = privRemoveFile(file); } catch (SecurityException se) { throw StandardException.newException( SQLState.FILE_CREATE_NO_CLEANUP, ioe, file, se.toString()); } if (!fileDeleted) { throw StandardException.newException( SQLState.FILE_CREATE_NO_CLEANUP, ioe, file, ioe.toString()); } throw StandardException.newException( SQLState.FILE_CREATE, ioe, file); } return null; } // end of case CREATE_CONTAINER_ACTION case REMOVE_FILE_ACTION: return privRemoveFile( actionFile) ? this : null; case OPEN_CONTAINER_ACTION: { boolean isStub = false; // is this a stub? StorageFile file = privGetFileName( actionIdentity, false, true, true); if (file == null) return null; try { if (!file.exists()) { // file does not exist, may be it has been stubbified file = privGetFileName( actionIdentity, true, true, true); if (!file.exists()) return null; isStub = true; } } catch (SecurityException se) { throw StandardException.newException( SQLState.DATA_UNEXPECTED_EXCEPTION, se); } canUpdate = false; try { if (!dataFactory.isReadOnly() && file.canWrite()) canUpdate = true; } catch (SecurityException se) { // just means we can't write to it. } fileName = file.toString(); try { fileData = file.getRandomAccessFile(canUpdate ? "rw" : "r"); readHeader(getEmbryonicPage(fileData, FIRST_ALLOC_PAGE_OFFSET)); if (SanityManager.DEBUG) { if (isStub) SanityManager.ASSERT(getDroppedState() && getCommittedDropState(), "a stub failed to set drop state"); } } catch (IOException ioe) { if (isStub) { throw dataFactory. markCorrupt(StandardException. newException(SQLState. FILE_CONTAINER_EXCEPTION, ioe, getIdentity() != null ? getIdentity().toString() : "unknown", "read", fileName)); } // maybe it is being stubbified... try that StorageFile stub = privGetFileName(actionIdentity, true, true, true); if (stub.exists()) { try { boolean delete_status = privRemoveFile(file); if (SanityManager.DEBUG) { if (!delete_status) { SanityManager.THROWASSERT( "delete of file (" + file + ") failed."); } } fileData = stub.getRandomAccessFile(canUpdate ? "rw" : "r"); readHeader(getEmbryonicPage(fileData, FIRST_ALLOC_PAGE_OFFSET)); } catch (IOException ioe2) { throw dataFactory. markCorrupt(StandardException. newException(SQLState. FILE_CONTAINER_EXCEPTION, ioe2, getIdentity() != null ? getIdentity().toString() : "unknown", "delete-stub", fileName)); } // RESOLVE: this is a temporary hack } else throw dataFactory. markCorrupt(StandardException. newException(SQLState. FILE_CONTAINER_EXCEPTION, ioe, getIdentity() != null ? getIdentity().toString() : "unknown", "read", fileName)); } return this; } // end of case OPEN_CONTAINER_ACTION case REOPEN_CONTAINER_ACTION: { StorageFile file = privGetFileName( actionIdentity, false, true, true); synchronized (this) { try { fileData = file.getRandomAccessFile(canUpdate ? "rw" : "r"); } catch (FileNotFoundException ioe) { throw dataFactory. markCorrupt( StandardException.newException( SQLState.FILE_CONTAINER_EXCEPTION, ioe, (getIdentity() != null ? getIdentity().toString() : "unknown"), "read", fileName)); } } return this; } case STUBBIFY_ACTION: { StorageFile file = privGetFileName( actionIdentity, false, false, true); StorageFile stub = privGetFileName( actionIdentity, true, false, false); StorageRandomAccessFile stubData = null; try { // !!!!! // bumpContainerVersion(); // // do NOT bump the container version. We WANT the stubbify // operation to get redone every time. This is because this // operation first writes out the stub and then remove the // container file. If we bump the version, then the stub will // contain the new version. And if the system crashes right then, // then we will skip the whole operation during redo even though // the container file may not have been removed. Since we don't // want to have the remove happen before the stub is written, we // cannot sync it and therefore cannot be sure the remove // happened before the system crashed. if (!stub.exists()) { // write the header to the stub stubData = stub.getRandomAccessFile( "rw"); stub.limitAccessToOwner(); writeRAFHeader( actionIdentity, stubData, true, /* create */ true); /* sync */ stubData.close(); stubData = null; } // Force WAL and check for database corruption before removing file. // This is one operation where the container is changed on disk // directly without going thru the container cache, which otherwise // would have force WAL. Take care of it here. dataFactory.flush(actionInstant); // try to remove the container file // fileDate is not null only if we are redoing a removeContainer // (stubbify) operation. Then fileData acutally is opened against // the stub and the original container file does not exist. // Then we need to close it here because this method is called by // cache.remove and nobody will be able to see fileData after this. privRemoveFile(file); } catch (SecurityException se) { throw StandardException. newException(SQLState.FILE_CANNOT_REMOVE_FILE, se, file, se.toString()); } catch (IOException ioe) { // exception thrown while in creating the stub. Remove the // (half-baked) stub try { if (stubData != null) { stubData.close(); stub.delete(); stubData = null; } if (fileData != null) { fileData.close(); fileData = null; } } catch (IOException ioe2) { throw StandardException.newException( SQLState.FILE_CANNOT_REMOVE_FILE, ioe2, file, ioe.toString()); } catch (SecurityException se) { throw StandardException.newException( SQLState.FILE_CANNOT_REMOVE_FILE, se, file, stub); } } //let the data factory know about this the stub file;It // could remove when next checkpoint occurs if it's not necessary for recovery dataFactory.stubFileToRemoveAfterCheckPoint(stub,actionInstant, getIdentity()); return null; } // end of case STUBBIFY_ACTION case GET_RANDOM_ACCESS_FILE_ACTION: { try { boolean exists = actionFile.exists(); Object result = actionFile.getRandomAccessFile("rw"); if (!exists) { actionFile.limitAccessToOwner(); } return result; } catch (FileNotFoundException fnfe) { throw StandardException.newException( SQLState.FILE_CREATE, fnfe, actionFile.getPath()); } } // end of case BACKUP_CONTAINER_ACTION } // end of switch return null; } // end of run
public Object run() throws StandardException { switch( actionCode) { case GET_FILE_NAME_ACTION: return privGetFileName( actionIdentity, actionStub, actionErrorOK, actionTryAlternatePath); case CREATE_CONTAINER_ACTION: { StorageFile file = privGetFileName( actionIdentity, false, false, false); try { if (file.exists()) { // note I'm left in the no-identity state as fillInIdentity() // hasn't been called. throw StandardException.newException( SQLState.FILE_EXISTS, file); } } catch (SecurityException se) { throw StandardException.newException( SQLState.FILE_CREATE, se, file); } try { // OK not to force WAL here, in fact, this operation // preceeds the creation of the log record to ensure // sufficient space. dataFactory.writeInProgress(); try { fileData = file.getRandomAccessFile( "rw"); file.limitAccessToOwner(); } finally { dataFactory.writeFinished(); } // This container format specifies that the first page is // an allocation page and the container information is // stored within it. The allocation page needs to be // somewhat formatted because if the system crashed after // the create container log operation is written, it needs // to be well formed enough to get the container // information back out of it. // // Don't try to go thru the page cache here because the // container object cannot be found in the container cache // at this point yet. However, if we use the page cache // to store the first allocation page, then in order to // write itself out, it needs to ask the container to do // so, which is going to create a deadlock. The // allocation page cannot write itself out without going // thru the container because it doesn't know where its // offset is. Here we effectively hardwire page 0 at // offset 0 of the container file to be the first // allocation page. // create an embryonic page - if this is not a temporary // container, synchronously write out the file header. canUpdate = true; // Need to set it now. After writeRAFHeader // may be too late in case that method's IO // is interrupted and container needs // reopening. To get the correct "rw" mode // we need canUpdate to be true. writeRAFHeader( actionIdentity, fileData, true, (actionIdentity.getSegmentId() != ContainerHandle.TEMPORARY_SEGMENT)); } catch (IOException ioe) { canUpdate = false; boolean fileDeleted; try { fileDeleted = privRemoveFile(file); } catch (SecurityException se) { throw StandardException.newException( SQLState.FILE_CREATE_NO_CLEANUP, ioe, file, se.toString()); } if (!fileDeleted) { throw StandardException.newException( SQLState.FILE_CREATE_NO_CLEANUP, ioe, file, ioe.toString()); } throw StandardException.newException( SQLState.FILE_CREATE, ioe, file); } return null; } // end of case CREATE_CONTAINER_ACTION case REMOVE_FILE_ACTION: return privRemoveFile( actionFile) ? this : null; case OPEN_CONTAINER_ACTION: { boolean isStub = false; // is this a stub? StorageFile file = privGetFileName( actionIdentity, false, true, true); if (file == null) return null; try { if (!file.exists()) { // file does not exist, may be it has been stubbified file = privGetFileName( actionIdentity, true, true, true); if (!file.exists()) return null; isStub = true; } } catch (SecurityException se) { throw StandardException.newException( SQLState.DATA_UNEXPECTED_EXCEPTION, se); } canUpdate = false; try { if (!dataFactory.isReadOnly() && file.canWrite()) canUpdate = true; } catch (SecurityException se) { // just means we can't write to it. } fileName = file.toString(); try { fileData = file.getRandomAccessFile(canUpdate ? "rw" : "r"); readHeader(getEmbryonicPage(fileData, FIRST_ALLOC_PAGE_OFFSET)); if (SanityManager.DEBUG) { if (isStub) SanityManager.ASSERT(getDroppedState() && getCommittedDropState(), "a stub failed to set drop state"); } } catch (IOException ioe) { if (isStub) { throw dataFactory. markCorrupt(StandardException. newException(SQLState. FILE_CONTAINER_EXCEPTION, ioe, getIdentity() != null ? getIdentity().toString() : "unknown", "read", fileName)); } // maybe it is being stubbified... try that StorageFile stub = privGetFileName(actionIdentity, true, true, true); if (stub.exists()) { try { boolean delete_status = privRemoveFile(file); if (SanityManager.DEBUG) { if (!delete_status) { SanityManager.THROWASSERT( "delete of file (" + file + ") failed."); } } fileData = stub.getRandomAccessFile(canUpdate ? "rw" : "r"); readHeader(getEmbryonicPage(fileData, FIRST_ALLOC_PAGE_OFFSET)); } catch (IOException ioe2) { throw dataFactory. markCorrupt(StandardException. newException(SQLState. FILE_CONTAINER_EXCEPTION, ioe2, getIdentity() != null ? getIdentity().toString() : "unknown", "delete-stub", fileName)); } // RESOLVE: this is a temporary hack } else throw dataFactory. markCorrupt(StandardException. newException(SQLState. FILE_CONTAINER_EXCEPTION, ioe, getIdentity() != null ? getIdentity().toString() : "unknown", "read", fileName)); } return this; } // end of case OPEN_CONTAINER_ACTION case REOPEN_CONTAINER_ACTION: { StorageFile file = privGetFileName( actionIdentity, false, true, true); synchronized (this) { try { fileData = file.getRandomAccessFile(canUpdate ? "rw" : "r"); } catch (FileNotFoundException ioe) { throw dataFactory. markCorrupt( StandardException.newException( SQLState.FILE_CONTAINER_EXCEPTION, ioe, (getIdentity() != null ? getIdentity().toString() : "unknown"), "read", fileName)); } } return this; } case STUBBIFY_ACTION: { StorageFile file = privGetFileName( actionIdentity, false, false, true); StorageFile stub = privGetFileName( actionIdentity, true, false, false); StorageRandomAccessFile stubData = null; try { // !!!!! // bumpContainerVersion(); // // do NOT bump the container version. We WANT the stubbify // operation to get redone every time. This is because this // operation first writes out the stub and then remove the // container file. If we bump the version, then the stub will // contain the new version. And if the system crashes right then, // then we will skip the whole operation during redo even though // the container file may not have been removed. Since we don't // want to have the remove happen before the stub is written, we // cannot sync it and therefore cannot be sure the remove // happened before the system crashed. if (!stub.exists()) { // write the header to the stub stubData = stub.getRandomAccessFile( "rw"); stub.limitAccessToOwner(); writeRAFHeader( actionIdentity, stubData, true, /* create */ true); /* sync */ stubData.close(); stubData = null; } // Force WAL and check for database corruption before removing file. // This is one operation where the container is changed on disk // directly without going thru the container cache, which otherwise // would have force WAL. Take care of it here. dataFactory.flush(actionInstant); // try to remove the container file // fileDate is not null only if we are redoing a removeContainer // (stubbify) operation. Then fileData acutally is opened against // the stub and the original container file does not exist. // Then we need to close it here because this method is called by // cache.remove and nobody will be able to see fileData after this. privRemoveFile(file); } catch (SecurityException se) { throw StandardException. newException(SQLState.FILE_CANNOT_REMOVE_FILE, se, file, se.toString()); } catch (IOException ioe) { // exception thrown while in creating the stub. Remove the // (half-baked) stub try { if (stubData != null) { stubData.close(); stub.delete(); stubData = null; } if (fileData != null) { fileData.close(); fileData = null; } } catch (IOException ioe2) { throw StandardException.newException( SQLState.FILE_CANNOT_REMOVE_FILE, ioe2, file, ioe.toString()); } catch (SecurityException se) { throw StandardException.newException( SQLState.FILE_CANNOT_REMOVE_FILE, se, file, se.toString()); } } //let the data factory know about this the stub file;It // could remove when next checkpoint occurs if it's not necessary for recovery dataFactory.stubFileToRemoveAfterCheckPoint(stub,actionInstant, getIdentity()); return null; } // end of case STUBBIFY_ACTION case GET_RANDOM_ACCESS_FILE_ACTION: { try { boolean exists = actionFile.exists(); Object result = actionFile.getRandomAccessFile("rw"); if (!exists) { actionFile.limitAccessToOwner(); } return result; } catch (FileNotFoundException fnfe) { throw StandardException.newException( SQLState.FILE_CREATE, fnfe, actionFile.getPath()); } } // end of case BACKUP_CONTAINER_ACTION } // end of switch return null; } // end of run
public void removeOldVersionOfContainers() throws StandardException { // Find the old version of the container files and delete them. String[] files = dataFactory.getContainerNames(); if (files != null) { // Loop through all the files in seg0 and // delete all old copies of the containers. for (int i = files.length-1; i >= 0 ; i--) { if (isOldContainerFile(files[i])) { StorageFile oldFile = getFile(files[i]); if (!privDelete(oldFile)) { throw StandardException.newException( SQLState.FILE_CANNOT_REMOVE_FILE, oldFile); } } } } }
public void removeOldVersionOfContainers() throws StandardException { // Find the old version of the container files and delete them. String[] files = dataFactory.getContainerNames(); if (files != null) { // Loop through all the files in seg0 and // delete all old copies of the containers. for (int i = files.length-1; i >= 0 ; i--) { if (isOldContainerFile(files[i])) { StorageFile oldFile = getFile(files[i]); if (!privDelete(oldFile)) { throw StandardException.newException( SQLState.FILE_CANNOT_REMOVE_ENCRYPT_FILE, oldFile); } } } } }
public void testSize() { assertEquals("size", 3, getTestVector().getNumNondefaultElements()); }
public void testSize() { assertEquals("size", 3, getTestVector().getNumNonZeroElements()); }
public LocalizedOutput(OutputStream o, String enc) throws UnsupportedEncodingException { super(new OutputStreamWriter(o, enc), true); out = o; }
public LocalizedOutput(OutputStream o){ super(new OutputStreamWriter(o), true); out = o; } LocalizedOutput(OutputStream o, String enc) throws UnsupportedEncodingException { super(new OutputStreamWriter(o, enc), true); out = o; }
public void useExistingCollation(TableDescriptor td) throws StandardException { ColumnDescriptor cd; // First verify that the column exists cd = td.getColumnDescriptor(name); if (cd == null) { throw StandardException.newException(SQLState.LANG_COLUMN_NOT_FOUND_IN_TABLE, name, td.getName()); } //getType() == null means we are dealing with drop column and hence //no need to worry about collation info if (getType() != null) { if (getType().getTypeId().isStringTypeId()) { setCollationType(cd.getType().getCollationType()); } } } /** * Get the action associated with this node. * * @return The action associated with this node. */ int getAction() { switch (getNodeType()) { case C_NodeTypes.MODIFY_COLUMN_DEFAULT_NODE: if (autoinc_create_or_modify_Start_Increment == ColumnDefinitionNode.MODIFY_AUTOINCREMENT_RESTART_VALUE) return ColumnInfo.MODIFY_COLUMN_DEFAULT_RESTART; else if (autoinc_create_or_modify_Start_Increment == ColumnDefinitionNode.MODIFY_AUTOINCREMENT_INC_VALUE) return ColumnInfo.MODIFY_COLUMN_DEFAULT_INCREMENT; else return ColumnInfo.MODIFY_COLUMN_DEFAULT_VALUE; case C_NodeTypes.MODIFY_COLUMN_TYPE_NODE: return ColumnInfo.MODIFY_COLUMN_TYPE; case C_NodeTypes.MODIFY_COLUMN_CONSTRAINT_NODE: return ColumnInfo.MODIFY_COLUMN_CONSTRAINT; case C_NodeTypes.MODIFY_COLUMN_CONSTRAINT_NOT_NULL_NODE: return ColumnInfo.MODIFY_COLUMN_CONSTRAINT_NOT_NULL; case C_NodeTypes.DROP_COLUMN_NODE: return ColumnInfo.DROP; default: if (SanityManager.DEBUG) { SanityManager.THROWASSERT("Unexpected nodeType = " + getNodeType()); } return 0; } } /** * Check the validity of the default, if any, for this node. * * @param dd The DataDictionary. * @param td The TableDescriptor. * * @exception StandardException Thrown on error */ void bindAndValidateDefault(DataDictionary dd, TableDescriptor td) throws StandardException { ColumnDescriptor cd; // First verify that the column exists cd = td.getColumnDescriptor(name); if (cd == null) { throw StandardException.newException(SQLState.LANG_COLUMN_NOT_FOUND_IN_TABLE, name, td.getName()); } // Get the UUID for the old default DefaultDescriptor defaultDescriptor = cd.getDefaultDescriptor(dd); oldDefaultUUID = (defaultDescriptor == null) ? null : defaultDescriptor.getUUID(); // Remember the column position columnPosition = cd.getPosition(); // No other work to do if no user specified default if (getNodeType() != C_NodeTypes.MODIFY_COLUMN_DEFAULT_NODE) { return; } // If the statement is not setting the column's default, then // recover the old default and re-use it. If the statement is // changing the start value for the auto-increment, then recover // the old increment-by value and re-use it. If the statement is // changing the increment-by value, then recover the old start value // and re-use it. This way, the column alteration only changes the // aspects of the autoincrement settings that it intends to change, // and does not lose the other aspecs. if (defaultNode == null) { defaultInfo = (DefaultInfoImpl)cd.getDefaultInfo(); } else { if ( cd.hasGenerationClause() ) { throw StandardException.newException( SQLState.LANG_GEN_COL_DEFAULT, cd.getColumnName() ); } } if (autoinc_create_or_modify_Start_Increment == ColumnDefinitionNode.MODIFY_AUTOINCREMENT_RESTART_VALUE) autoincrementIncrement = cd.getAutoincInc(); if (autoinc_create_or_modify_Start_Increment == ColumnDefinitionNode.MODIFY_AUTOINCREMENT_INC_VALUE) autoincrementStart = cd.getAutoincStart(); /* Fill in the DataTypeServices from the DataDictionary */ type = cd.getType(); // Now validate the default validateDefault(dd, td); }
public void useExistingCollation(TableDescriptor td) throws StandardException { ColumnDescriptor cd; // First verify that the column exists cd = td.getColumnDescriptor(name); if (cd == null) { throw StandardException.newException(SQLState.LANG_COLUMN_NOT_FOUND_IN_TABLE, name, td.getName()); } //getType() == null means we are dealing with drop column and hence //no need to worry about collation info if (getType() != null) { if (getType().getTypeId().isStringTypeId()) { setCollationType(cd.getType().getCollationType()); } } } /** * Get the action associated with this node. * * @return The action associated with this node. */ int getAction() { switch (getNodeType()) { case C_NodeTypes.MODIFY_COLUMN_DEFAULT_NODE: if (autoinc_create_or_modify_Start_Increment == ColumnDefinitionNode.MODIFY_AUTOINCREMENT_RESTART_VALUE) return ColumnInfo.MODIFY_COLUMN_DEFAULT_RESTART; else if (autoinc_create_or_modify_Start_Increment == ColumnDefinitionNode.MODIFY_AUTOINCREMENT_INC_VALUE) return ColumnInfo.MODIFY_COLUMN_DEFAULT_INCREMENT; else return ColumnInfo.MODIFY_COLUMN_DEFAULT_VALUE; case C_NodeTypes.MODIFY_COLUMN_TYPE_NODE: return ColumnInfo.MODIFY_COLUMN_TYPE; case C_NodeTypes.MODIFY_COLUMN_CONSTRAINT_NODE: return ColumnInfo.MODIFY_COLUMN_CONSTRAINT; case C_NodeTypes.MODIFY_COLUMN_CONSTRAINT_NOT_NULL_NODE: return ColumnInfo.MODIFY_COLUMN_CONSTRAINT_NOT_NULL; case C_NodeTypes.DROP_COLUMN_NODE: return ColumnInfo.DROP; default: if (SanityManager.DEBUG) { SanityManager.THROWASSERT("Unexpected nodeType = " + getNodeType()); } return 0; } } /** * Check the validity of the default, if any, for this node. * * @param dd The DataDictionary. * @param td The TableDescriptor. * * @exception StandardException Thrown on error */ void bindAndValidateDefault(DataDictionary dd, TableDescriptor td) throws StandardException { ColumnDescriptor cd; // First verify that the column exists cd = td.getColumnDescriptor(name); if (cd == null) { throw StandardException.newException(SQLState.LANG_COLUMN_NOT_FOUND_IN_TABLE, name, td.getName()); } // Get the UUID for the old default DefaultDescriptor defaultDescriptor = cd.getDefaultDescriptor(dd); oldDefaultUUID = (defaultDescriptor == null) ? null : defaultDescriptor.getUUID(); // Remember the column position columnPosition = cd.getPosition(); // No other work to do if no user specified default if (getNodeType() != C_NodeTypes.MODIFY_COLUMN_DEFAULT_NODE) { return; } // If the statement is not setting the column's default, then // recover the old default and re-use it. If the statement is // changing the start value for the auto-increment, then recover // the old increment-by value and re-use it. If the statement is // changing the increment-by value, then recover the old start value // and re-use it. This way, the column alteration only changes the // aspects of the autoincrement settings that it intends to change, // and does not lose the other aspecs. if (keepCurrentDefault) { defaultInfo = (DefaultInfoImpl)cd.getDefaultInfo(); } else { if ( cd.hasGenerationClause() ) { throw StandardException.newException( SQLState.LANG_GEN_COL_DEFAULT, cd.getColumnName() ); } } if (autoinc_create_or_modify_Start_Increment == ColumnDefinitionNode.MODIFY_AUTOINCREMENT_RESTART_VALUE) autoincrementIncrement = cd.getAutoincInc(); if (autoinc_create_or_modify_Start_Increment == ColumnDefinitionNode.MODIFY_AUTOINCREMENT_INC_VALUE) autoincrementStart = cd.getAutoincStart(); /* Fill in the DataTypeServices from the DataDictionary */ type = cd.getType(); // Now validate the default validateDefault(dd, td); }
public static void main(String[] args) throws Throwable { int minRow; int maxRow; String rowPrefix, keyspace = "Keyspace1"; if (args.length > 0) { keyspace = args[0]; rowPrefix = args[1]; minRow = Integer.parseInt(args[2]); maxRow = minRow + 1; } else { minRow = 1; maxRow = 10; rowPrefix = "row"; } TestRingCache tester = new TestRingCache(keyspace); for (int nRows = minRow; nRows < maxRow; nRows++) { ByteBuffer row = ByteBuffer.wrap((rowPrefix + nRows).getBytes()); ColumnPath col = new ColumnPath("Standard1").setSuper_column((ByteBuffer)null).setColumn("col1".getBytes()); ColumnParent parent = new ColumnParent("Standard1").setSuper_column((ByteBuffer)null); Collection<InetAddress> endpoints = tester.ringCache.getEndpoint(row); InetAddress firstEndpoint = endpoints.iterator().next(); System.out.printf("hosts with key %s : %s; choose %s%n", new String(row.array()), StringUtils.join(endpoints, ","), firstEndpoint); // now, read the row back directly from the host owning the row locally tester.setup(firstEndpoint.getHostAddress(), DatabaseDescriptor.getRpcPort()); tester.thriftClient.set_keyspace(keyspace); tester.thriftClient.insert(row, parent, new Column(ByteBufferUtil.bytes("col1"), ByteBufferUtil.bytes("val1"), 1), ConsistencyLevel.ONE); Column column = tester.thriftClient.get(row, col, ConsistencyLevel.ONE).column; System.out.println("read row " + new String(row.array()) + " " + new String(column.name.array()) + ":" + new String(column.value.array()) + ":" + column.timestamp); } System.exit(1); }
public static void main(String[] args) throws Throwable { int minRow; int maxRow; String rowPrefix, keyspace = "Keyspace1"; if (args.length > 0) { keyspace = args[0]; rowPrefix = args[1]; minRow = Integer.parseInt(args[2]); maxRow = minRow + 1; } else { minRow = 1; maxRow = 10; rowPrefix = "row"; } TestRingCache tester = new TestRingCache(keyspace); for (int nRows = minRow; nRows < maxRow; nRows++) { ByteBuffer row = ByteBufferUtil.bytes((rowPrefix + nRows)); ColumnPath col = new ColumnPath("Standard1").setSuper_column((ByteBuffer)null).setColumn("col1".getBytes()); ColumnParent parent = new ColumnParent("Standard1").setSuper_column((ByteBuffer)null); Collection<InetAddress> endpoints = tester.ringCache.getEndpoint(row); InetAddress firstEndpoint = endpoints.iterator().next(); System.out.printf("hosts with key %s : %s; choose %s%n", new String(row.array()), StringUtils.join(endpoints, ","), firstEndpoint); // now, read the row back directly from the host owning the row locally tester.setup(firstEndpoint.getHostAddress(), DatabaseDescriptor.getRpcPort()); tester.thriftClient.set_keyspace(keyspace); tester.thriftClient.insert(row, parent, new Column(ByteBufferUtil.bytes("col1"), ByteBufferUtil.bytes("val1"), 1), ConsistencyLevel.ONE); Column column = tester.thriftClient.get(row, col, ConsistencyLevel.ONE).column; System.out.println("read row " + new String(row.array()) + " " + new String(column.name.array()) + ":" + new String(column.value.array()) + ":" + column.timestamp); } System.exit(1); }
public void testRowIteration() throws IOException, ExecutionException, InterruptedException { Table table = Table.open(TABLE1); ColumnFamilyStore store = table.getColumnFamilyStore("Super3"); final int ROWS_PER_SSTABLE = 10; Set<DecoratedKey> inserted = new HashSet<DecoratedKey>(); for (int i = 0; i < ROWS_PER_SSTABLE; i++) { DecoratedKey key = Util.dk(String.valueOf(i)); RowMutation rm = new RowMutation(TABLE1, key.key); rm.add(new QueryPath("Super3", ByteBufferUtil.bytes("sc"), ByteBuffer.wrap(String.valueOf(i).getBytes())), ByteBuffer.wrap(new byte[ROWS_PER_SSTABLE * 10 - i * 2]), i); rm.apply(); inserted.add(key); } store.forceBlockingFlush(); assertEquals(inserted.toString(), inserted.size(), Util.getRangeSlice(store).size()); }
public void testRowIteration() throws IOException, ExecutionException, InterruptedException { Table table = Table.open(TABLE1); ColumnFamilyStore store = table.getColumnFamilyStore("Super3"); final int ROWS_PER_SSTABLE = 10; Set<DecoratedKey> inserted = new HashSet<DecoratedKey>(); for (int i = 0; i < ROWS_PER_SSTABLE; i++) { DecoratedKey key = Util.dk(String.valueOf(i)); RowMutation rm = new RowMutation(TABLE1, key.key); rm.add(new QueryPath("Super3", ByteBufferUtil.bytes("sc"), ByteBufferUtil.bytes(String.valueOf(i))), ByteBuffer.wrap(new byte[ROWS_PER_SSTABLE * 10 - i * 2]), i); rm.apply(); inserted.add(key); } store.forceBlockingFlush(); assertEquals(inserted.toString(), inserted.size(), Util.getRangeSlice(store).size()); }
public void testTimeSort() throws IOException, ExecutionException, InterruptedException { Table table = Table.open("Keyspace1"); ColumnFamilyStore cfStore = table.getColumnFamilyStore("StandardLong1"); for (int i = 900; i < 1000; ++i) { RowMutation rm = new RowMutation("Keyspace1", ByteBuffer.wrap(Integer.toString(i).getBytes())); for (int j = 0; j < 8; ++j) { rm.add(new QueryPath("StandardLong1", null, getBytes(j * 2)), ByteBufferUtil.bytes("a"), j * 2); } rm.apply(); } validateTimeSort(table); cfStore.forceBlockingFlush(); validateTimeSort(table); // interleave some new data to test memtable + sstable DecoratedKey key = Util.dk("900"); RowMutation rm = new RowMutation("Keyspace1", key.key); for (int j = 0; j < 4; ++j) { rm.add(new QueryPath("StandardLong1", null, getBytes(j * 2 + 1)), ByteBufferUtil.bytes("b"), j * 2 + 1); } rm.apply(); // and some overwrites rm = new RowMutation("Keyspace1", key.key); rm.add(new QueryPath("StandardLong1", null, getBytes(0)), ByteBufferUtil.bytes("c"), 100); rm.add(new QueryPath("StandardLong1", null, getBytes(10)), ByteBufferUtil.bytes("c"), 100); rm.apply(); // verify ColumnFamily cf = cfStore.getColumnFamily(key, new QueryPath("StandardLong1"), getBytes(0), ByteBufferUtil.EMPTY_BYTE_BUFFER, false, 1000); Collection<IColumn> columns = cf.getSortedColumns(); assertEquals(12, columns.size()); Iterator<IColumn> iter = columns.iterator(); IColumn column; for (int j = 0; j < 8; j++) { column = iter.next(); assert column.name().equals(getBytes(j)); } TreeSet<ByteBuffer> columnNames = new TreeSet<ByteBuffer>(LongType.instance); columnNames.add(getBytes(10)); columnNames.add(getBytes(0)); cf = cfStore.getColumnFamily(QueryFilter.getNamesFilter(Util.dk("900"), new QueryPath("StandardLong1"), columnNames)); assert "c".equals(new String(cf.getColumn(getBytes(0)).value().array())); assert "c".equals(new String(cf.getColumn(getBytes(10)).value().array())); }
public void testTimeSort() throws IOException, ExecutionException, InterruptedException { Table table = Table.open("Keyspace1"); ColumnFamilyStore cfStore = table.getColumnFamilyStore("StandardLong1"); for (int i = 900; i < 1000; ++i) { RowMutation rm = new RowMutation("Keyspace1", ByteBufferUtil.bytes(Integer.toString(i))); for (int j = 0; j < 8; ++j) { rm.add(new QueryPath("StandardLong1", null, getBytes(j * 2)), ByteBufferUtil.bytes("a"), j * 2); } rm.apply(); } validateTimeSort(table); cfStore.forceBlockingFlush(); validateTimeSort(table); // interleave some new data to test memtable + sstable DecoratedKey key = Util.dk("900"); RowMutation rm = new RowMutation("Keyspace1", key.key); for (int j = 0; j < 4; ++j) { rm.add(new QueryPath("StandardLong1", null, getBytes(j * 2 + 1)), ByteBufferUtil.bytes("b"), j * 2 + 1); } rm.apply(); // and some overwrites rm = new RowMutation("Keyspace1", key.key); rm.add(new QueryPath("StandardLong1", null, getBytes(0)), ByteBufferUtil.bytes("c"), 100); rm.add(new QueryPath("StandardLong1", null, getBytes(10)), ByteBufferUtil.bytes("c"), 100); rm.apply(); // verify ColumnFamily cf = cfStore.getColumnFamily(key, new QueryPath("StandardLong1"), getBytes(0), ByteBufferUtil.EMPTY_BYTE_BUFFER, false, 1000); Collection<IColumn> columns = cf.getSortedColumns(); assertEquals(12, columns.size()); Iterator<IColumn> iter = columns.iterator(); IColumn column; for (int j = 0; j < 8; j++) { column = iter.next(); assert column.name().equals(getBytes(j)); } TreeSet<ByteBuffer> columnNames = new TreeSet<ByteBuffer>(LongType.instance); columnNames.add(getBytes(10)); columnNames.add(getBytes(0)); cf = cfStore.getColumnFamily(QueryFilter.getNamesFilter(Util.dk("900"), new QueryPath("StandardLong1"), columnNames)); assert "c".equals(new String(cf.getColumn(getBytes(0)).value().array())); assert "c".equals(new String(cf.getColumn(getBytes(10)).value().array())); }
public void testCompactions() throws IOException, ExecutionException, InterruptedException { CompactionManager.instance.disableAutoCompaction(); // this test does enough rows to force multiple block indexes to be used Table table = Table.open(TABLE1); ColumnFamilyStore store = table.getColumnFamilyStore("Standard1"); final int ROWS_PER_SSTABLE = 10; Set<DecoratedKey> inserted = new HashSet<DecoratedKey>(); for (int j = 0; j < (DatabaseDescriptor.getIndexInterval() * 3) / ROWS_PER_SSTABLE; j++) { for (int i = 0; i < ROWS_PER_SSTABLE; i++) { DecoratedKey key = Util.dk(String.valueOf(i % 2)); RowMutation rm = new RowMutation(TABLE1, key.key); rm.add(new QueryPath("Standard1", null, ByteBuffer.wrap(String.valueOf(i / 2).getBytes())), ByteBufferUtil.EMPTY_BYTE_BUFFER, j * ROWS_PER_SSTABLE + i); rm.apply(); inserted.add(key); } store.forceBlockingFlush(); assertEquals(inserted.toString(), inserted.size(), Util.getRangeSlice(store).size()); } while (true) { Future<Integer> ft = CompactionManager.instance.submitMinorIfNeeded(store); if (ft.get() == 0) break; } if (store.getSSTables().size() > 1) { CompactionManager.instance.performMajor(store); } assertEquals(inserted.size(), Util.getRangeSlice(store).size()); }
public void testCompactions() throws IOException, ExecutionException, InterruptedException { CompactionManager.instance.disableAutoCompaction(); // this test does enough rows to force multiple block indexes to be used Table table = Table.open(TABLE1); ColumnFamilyStore store = table.getColumnFamilyStore("Standard1"); final int ROWS_PER_SSTABLE = 10; Set<DecoratedKey> inserted = new HashSet<DecoratedKey>(); for (int j = 0; j < (DatabaseDescriptor.getIndexInterval() * 3) / ROWS_PER_SSTABLE; j++) { for (int i = 0; i < ROWS_PER_SSTABLE; i++) { DecoratedKey key = Util.dk(String.valueOf(i % 2)); RowMutation rm = new RowMutation(TABLE1, key.key); rm.add(new QueryPath("Standard1", null, ByteBufferUtil.bytes(String.valueOf(i / 2))), ByteBufferUtil.EMPTY_BYTE_BUFFER, j * ROWS_PER_SSTABLE + i); rm.apply(); inserted.add(key); } store.forceBlockingFlush(); assertEquals(inserted.toString(), inserted.size(), Util.getRangeSlice(store).size()); } while (true) { Future<Integer> ft = CompactionManager.instance.submitMinorIfNeeded(store); if (ft.get() == 0) break; } if (store.getSSTables().size() > 1) { CompactionManager.instance.performMajor(store); } assertEquals(inserted.size(), Util.getRangeSlice(store).size()); }
public void testManyColumns() throws IOException { ColumnFamily cf; TreeMap<String, String> map = new TreeMap<String, String>(); for (int i = 100; i < 1000; ++i) { map.put(Integer.toString(i), "Avinash Lakshman is a good man: " + i); } // write cf = ColumnFamily.create("Keyspace1", "Standard1"); DataOutputBuffer bufOut = new DataOutputBuffer(); for (String cName : map.navigableKeySet()) { cf.addColumn(column(cName, map.get(cName), 314)); } ColumnFamily.serializer().serialize(cf, bufOut); // verify ByteArrayInputStream bufIn = new ByteArrayInputStream(bufOut.getData(), 0, bufOut.getLength()); cf = ColumnFamily.serializer().deserialize(new DataInputStream(bufIn)); for (String cName : map.navigableKeySet()) { ByteBuffer val = cf.getColumn(ByteBuffer.wrap(cName.getBytes())).value(); assert new String(val.array(),val.position(),val.remaining()).equals(map.get(cName)); } assert cf.getColumnNames().size() == map.size(); }
public void testManyColumns() throws IOException { ColumnFamily cf; TreeMap<String, String> map = new TreeMap<String, String>(); for (int i = 100; i < 1000; ++i) { map.put(Integer.toString(i), "Avinash Lakshman is a good man: " + i); } // write cf = ColumnFamily.create("Keyspace1", "Standard1"); DataOutputBuffer bufOut = new DataOutputBuffer(); for (String cName : map.navigableKeySet()) { cf.addColumn(column(cName, map.get(cName), 314)); } ColumnFamily.serializer().serialize(cf, bufOut); // verify ByteArrayInputStream bufIn = new ByteArrayInputStream(bufOut.getData(), 0, bufOut.getLength()); cf = ColumnFamily.serializer().deserialize(new DataInputStream(bufIn)); for (String cName : map.navigableKeySet()) { ByteBuffer val = cf.getColumn(ByteBufferUtil.bytes(cName)).value(); assert new String(val.array(),val.position(),val.remaining()).equals(map.get(cName)); } assert cf.getColumnNames().size() == map.size(); }
public void testDecode() throws IOException { ByteBuffer bytes = ByteBuffer.wrap(new byte[]{(byte)0xff, (byte)0xfe}); ByteBufferUtil.string(bytes, Charsets.UTF_8); }
public void testDecode() throws IOException { ByteBuffer bytes = ByteBuffer.wrap(new byte[]{(byte)0xff, (byte)0xfe}); ByteBufferUtil.string(bytes); }
public String asHex(String str) { return bytesToHex(ByteBuffer.wrap(str.getBytes())); }
public String asHex(String str) { return bytesToHex(ByteBufferUtil.bytes(str)); }
public ByteBuffer fromString(String source) { return ByteBuffer.wrap(source.getBytes(Charsets.US_ASCII)); }
public ByteBuffer fromString(String source) { return ByteBufferUtil.bytes(source, Charsets.US_ASCII); }
public void run(Cassandra.Client client) throws IOException { List<ByteBuffer> values = generateValues(); List<Column> columns = new ArrayList<Column>(); List<SuperColumn> superColumns = new ArrayList<SuperColumn>(); // format used for keys String format = "%0" + session.getTotalKeysLength() + "d"; for (int i = 0; i < session.getColumnsPerKey(); i++) { String columnName = ("C" + Integer.toString(i)); ByteBuffer columnValue = values.get(i % values.size()); columns.add(new Column(ByteBufferUtil.bytes(columnName), columnValue, System.currentTimeMillis())); } if (session.getColumnFamilyType() == ColumnFamilyType.Super) { // supers = [SuperColumn('S' + str(j), columns) for j in xrange(supers_per_key)] for (int i = 0; i < session.getSuperColumns(); i++) { String superColumnName = "S" + Integer.toString(i); superColumns.add(new SuperColumn(ByteBuffer.wrap(superColumnName.getBytes()), columns)); } } String rawKey = String.format(format, index); Map<ByteBuffer, Map<String, List<Mutation>>> record = new HashMap<ByteBuffer, Map<String, List<Mutation>>>(); record.put(ByteBufferUtil.bytes(rawKey), session.getColumnFamilyType() == ColumnFamilyType.Super ? getSuperColumnsMutationMap(superColumns) : getColumnsMutationMap(columns)); long start = System.currentTimeMillis(); boolean success = false; String exceptionMessage = null; for (int t = 0; t < session.getRetryTimes(); t++) { if (success) break; try { client.batch_mutate(record, session.getConsistencyLevel()); success = true; } catch (Exception e) { exceptionMessage = getExceptionMessage(e); success = false; } } if (!success) { error(String.format("Operation [%d] retried %d times - error inserting key %s %s%n", index, session.getRetryTimes(), rawKey, (exceptionMessage == null) ? "" : "(" + exceptionMessage + ")")); } session.operations.getAndIncrement(); session.keys.getAndIncrement(); session.latency.getAndAdd(System.currentTimeMillis() - start); }
public void run(Cassandra.Client client) throws IOException { List<ByteBuffer> values = generateValues(); List<Column> columns = new ArrayList<Column>(); List<SuperColumn> superColumns = new ArrayList<SuperColumn>(); // format used for keys String format = "%0" + session.getTotalKeysLength() + "d"; for (int i = 0; i < session.getColumnsPerKey(); i++) { String columnName = ("C" + Integer.toString(i)); ByteBuffer columnValue = values.get(i % values.size()); columns.add(new Column(ByteBufferUtil.bytes(columnName), columnValue, System.currentTimeMillis())); } if (session.getColumnFamilyType() == ColumnFamilyType.Super) { // supers = [SuperColumn('S' + str(j), columns) for j in xrange(supers_per_key)] for (int i = 0; i < session.getSuperColumns(); i++) { String superColumnName = "S" + Integer.toString(i); superColumns.add(new SuperColumn(ByteBufferUtil.bytes(superColumnName), columns)); } } String rawKey = String.format(format, index); Map<ByteBuffer, Map<String, List<Mutation>>> record = new HashMap<ByteBuffer, Map<String, List<Mutation>>>(); record.put(ByteBufferUtil.bytes(rawKey), session.getColumnFamilyType() == ColumnFamilyType.Super ? getSuperColumnsMutationMap(superColumns) : getColumnsMutationMap(columns)); long start = System.currentTimeMillis(); boolean success = false; String exceptionMessage = null; for (int t = 0; t < session.getRetryTimes(); t++) { if (success) break; try { client.batch_mutate(record, session.getConsistencyLevel()); success = true; } catch (Exception e) { exceptionMessage = getExceptionMessage(e); success = false; } } if (!success) { error(String.format("Operation [%d] retried %d times - error inserting key %s %s%n", index, session.getRetryTimes(), rawKey, (exceptionMessage == null) ? "" : "(" + exceptionMessage + ")")); } session.operations.getAndIncrement(); session.keys.getAndIncrement(); session.latency.getAndAdd(System.currentTimeMillis() - start); }
protected final Similarity similarity; PhraseScorer(Weight weight, PhraseQuery.PostingsAndFreq[] postings, Similarity similarity, byte[] norms) { super(weight); this.similarity = similarity; this.norms = norms; this.value = weight.getValue(); // convert tps to a list of phrase positions. // note: phrase-position differs from term-position in that its position // reflects the phrase offset: pp.pos = tp.pos - offset. // this allows to easily identify a matching (exact) phrase // when all PhrasePositions have exactly the same position. for (int i = 0; i < postings.length; i++) { PhrasePositions pp = new PhrasePositions(postings[i].postings, postings[i].position); if (last != null) { // add next to end of list last.next = pp; } else { first = pp; } last = pp; } pq = new PhraseQueue(postings.length); // construct empty pq first.doc = -1; }
protected final Similarity similarity; PhraseScorer(Weight weight, PhraseQuery.PostingsAndFreq[] postings, Similarity similarity, byte[] norms) { super(weight); this.similarity = similarity; this.norms = norms; this.value = weight.getValue(); // convert tps to a list of phrase positions. // note: phrase-position differs from term-position in that its position // reflects the phrase offset: pp.pos = tp.pos - offset. // this allows to easily identify a matching (exact) phrase // when all PhrasePositions have exactly the same position. for (int i = 0; i < postings.length; i++) { PhrasePositions pp = new PhrasePositions(postings[i].postings, postings[i].position, i); if (last != null) { // add next to end of list last.next = pp; } else { first = pp; } last = pp; } pq = new PhraseQueue(postings.length); // construct empty pq first.doc = -1; }
public void testRemoveColumnFamily() throws IOException, ExecutionException, InterruptedException { Table table = Table.open("Keyspace1"); ColumnFamilyStore store = table.getColumnFamilyStore("Standard1"); RowMutation rm; DecoratedKey dk = Util.dk("key1"); // add data rm = new RowMutation("Keyspace1", dk.key); rm.add(new QueryPath("Standard1", null, "Column1".getBytes()), "asdf".getBytes(), new TimestampClock(0)); rm.apply(); // remove rm = new RowMutation("Keyspace1", dk.key); rm.delete(new QueryPath("Standard1"), new TimestampClock(1)); rm.apply(); ColumnFamily retrieved = store.getColumnFamily(QueryFilter.getIdentityFilter(dk, new QueryPath("Standard1", null, "Column1".getBytes()))); assert retrieved.isMarkedForDelete(); assertNull(retrieved.getColumn("Column1".getBytes())); assertNull(ColumnFamilyStore.removeDeleted(retrieved, Integer.MAX_VALUE)); }
public void testRemoveColumnFamily() throws IOException, ExecutionException, InterruptedException { Table table = Table.open("Keyspace1"); ColumnFamilyStore store = table.getColumnFamilyStore("Standard1"); RowMutation rm; DecoratedKey dk = Util.dk("key1"); // add data rm = new RowMutation("Keyspace1", dk.key); rm.add(new QueryPath("Standard1", null, "Column1".getBytes()), "asdf".getBytes(), new TimestampClock(0)); rm.apply(); // remove rm = new RowMutation("Keyspace1", dk.key); rm.delete(new QueryPath("Standard1"), new TimestampClock(1)); rm.apply(); ColumnFamily retrieved = store.getColumnFamily(QueryFilter.getIdentityFilter(dk, new QueryPath("Standard1", null, "Column1".getBytes()))); assert retrieved.isMarkedForDelete(); assertNull(retrieved.getColumn("Column1".getBytes())); assertNull(Util.cloneAndRemoveDeleted(retrieved, Integer.MAX_VALUE)); }
public void testRemoveSubColumn() throws IOException, ExecutionException, InterruptedException { Table table = Table.open("Keyspace1"); ColumnFamilyStore store = table.getColumnFamilyStore("Super1"); RowMutation rm; DecoratedKey dk = Util.dk("key1"); // add data rm = new RowMutation("Keyspace1", dk.key); Util.addMutation(rm, "Super1", "SC1", 1, "asdf", new TimestampClock(0)); rm.apply(); store.forceBlockingFlush(); // remove rm = new RowMutation("Keyspace1", dk.key); rm.delete(new QueryPath("Super1", "SC1".getBytes(), getBytes(1)), new TimestampClock(1)); rm.apply(); ColumnFamily retrieved = store.getColumnFamily(QueryFilter.getIdentityFilter(dk, new QueryPath("Super1", "SC1".getBytes()))); assert retrieved.getColumn("SC1".getBytes()).getSubColumn(getBytes(1)).isMarkedForDelete(); assertNull(ColumnFamilyStore.removeDeleted(retrieved, Integer.MAX_VALUE)); }
public void testRemoveSubColumn() throws IOException, ExecutionException, InterruptedException { Table table = Table.open("Keyspace1"); ColumnFamilyStore store = table.getColumnFamilyStore("Super1"); RowMutation rm; DecoratedKey dk = Util.dk("key1"); // add data rm = new RowMutation("Keyspace1", dk.key); Util.addMutation(rm, "Super1", "SC1", 1, "asdf", new TimestampClock(0)); rm.apply(); store.forceBlockingFlush(); // remove rm = new RowMutation("Keyspace1", dk.key); rm.delete(new QueryPath("Super1", "SC1".getBytes(), getBytes(1)), new TimestampClock(1)); rm.apply(); ColumnFamily retrieved = store.getColumnFamily(QueryFilter.getIdentityFilter(dk, new QueryPath("Super1", "SC1".getBytes()))); assert retrieved.getColumn("SC1".getBytes()).getSubColumn(getBytes(1)).isMarkedForDelete(); assertNull(Util.cloneAndRemoveDeleted(retrieved, Integer.MAX_VALUE)); }
public void testRemoveColumnFamilyWithFlush1() throws IOException, ExecutionException, InterruptedException { Table table = Table.open("Keyspace1"); ColumnFamilyStore store = table.getColumnFamilyStore("Standard1"); RowMutation rm; DecoratedKey dk = Util.dk("key1"); // add data rm = new RowMutation("Keyspace1", dk.key); rm.add(new QueryPath("Standard1", null, "Column1".getBytes()), "asdf".getBytes(), new TimestampClock(0)); rm.add(new QueryPath("Standard1", null, "Column2".getBytes()), "asdf".getBytes(), new TimestampClock(0)); rm.apply(); store.forceBlockingFlush(); // remove rm = new RowMutation("Keyspace1", dk.key); rm.delete(new QueryPath("Standard1"), new TimestampClock(1)); rm.apply(); ColumnFamily retrieved = store.getColumnFamily(QueryFilter.getIdentityFilter(dk, new QueryPath("Standard1"))); assert retrieved.isMarkedForDelete(); assertNull(retrieved.getColumn("Column1".getBytes())); assertNull(ColumnFamilyStore.removeDeleted(retrieved, Integer.MAX_VALUE)); }
public void testRemoveColumnFamilyWithFlush1() throws IOException, ExecutionException, InterruptedException { Table table = Table.open("Keyspace1"); ColumnFamilyStore store = table.getColumnFamilyStore("Standard1"); RowMutation rm; DecoratedKey dk = Util.dk("key1"); // add data rm = new RowMutation("Keyspace1", dk.key); rm.add(new QueryPath("Standard1", null, "Column1".getBytes()), "asdf".getBytes(), new TimestampClock(0)); rm.add(new QueryPath("Standard1", null, "Column2".getBytes()), "asdf".getBytes(), new TimestampClock(0)); rm.apply(); store.forceBlockingFlush(); // remove rm = new RowMutation("Keyspace1", dk.key); rm.delete(new QueryPath("Standard1"), new TimestampClock(1)); rm.apply(); ColumnFamily retrieved = store.getColumnFamily(QueryFilter.getIdentityFilter(dk, new QueryPath("Standard1"))); assert retrieved.isMarkedForDelete(); assertNull(retrieved.getColumn("Column1".getBytes())); assertNull(Util.cloneAndRemoveDeleted(retrieved, Integer.MAX_VALUE)); }
public void testRemoveColumnFamilyWithFlush2() throws IOException, ExecutionException, InterruptedException { Table table = Table.open("Keyspace1"); ColumnFamilyStore store = table.getColumnFamilyStore("Standard1"); RowMutation rm; DecoratedKey dk = Util.dk("key1"); // add data rm = new RowMutation("Keyspace1", dk.key); rm.add(new QueryPath("Standard1", null, "Column1".getBytes()), "asdf".getBytes(), new TimestampClock(0)); rm.apply(); // remove rm = new RowMutation("Keyspace1", dk.key); rm.delete(new QueryPath("Standard1"), new TimestampClock(1)); rm.apply(); store.forceBlockingFlush(); ColumnFamily retrieved = store.getColumnFamily(QueryFilter.getIdentityFilter(dk, new QueryPath("Standard1", null, "Column1".getBytes()))); assert retrieved.isMarkedForDelete(); assertNull(retrieved.getColumn("Column1".getBytes())); assertNull(ColumnFamilyStore.removeDeleted(retrieved, Integer.MAX_VALUE)); }
public void testRemoveColumnFamilyWithFlush2() throws IOException, ExecutionException, InterruptedException { Table table = Table.open("Keyspace1"); ColumnFamilyStore store = table.getColumnFamilyStore("Standard1"); RowMutation rm; DecoratedKey dk = Util.dk("key1"); // add data rm = new RowMutation("Keyspace1", dk.key); rm.add(new QueryPath("Standard1", null, "Column1".getBytes()), "asdf".getBytes(), new TimestampClock(0)); rm.apply(); // remove rm = new RowMutation("Keyspace1", dk.key); rm.delete(new QueryPath("Standard1"), new TimestampClock(1)); rm.apply(); store.forceBlockingFlush(); ColumnFamily retrieved = store.getColumnFamily(QueryFilter.getIdentityFilter(dk, new QueryPath("Standard1", null, "Column1".getBytes()))); assert retrieved.isMarkedForDelete(); assertNull(retrieved.getColumn("Column1".getBytes())); assertNull(Util.cloneAndRemoveDeleted(retrieved, Integer.MAX_VALUE)); }
public boolean isEmpty() { boolean cfIrrelevant = ColumnFamilyStore.removeDeleted(emptyColumnFamily, gcBefore) == null; return cfIrrelevant && columnCount == 0; }
public boolean isEmpty() { boolean cfIrrelevant = ColumnFamilyStore.removeDeletedCF(emptyColumnFamily, gcBefore) == null; return cfIrrelevant && columnCount == 0; }
public EliasFanoEncoder(long numValues, long upperBound, long indexInterval) { if (numValues < 0L) { throw new IllegalArgumentException("numValues should not be negative: " + numValues); } this.numValues = numValues; if ((numValues > 0L) && (upperBound < 0L)) { throw new IllegalArgumentException("upperBound should not be negative: " + upperBound + " when numValues > 0"); } this.upperBound = numValues > 0 ? upperBound : -1L; // if there is no value, -1 is the best upper bound int nLowBits = 0; if (this.numValues > 0) { // nLowBits = max(0; floor(2log(upperBound/numValues))) long lowBitsFac = this.upperBound / this.numValues; if (lowBitsFac > 0) { nLowBits = 63 - Long.numberOfLeadingZeros(lowBitsFac); // see Long.numberOfLeadingZeros javadocs } } this.numLowBits = nLowBits; this.lowerBitsMask = Long.MAX_VALUE >>> (Long.SIZE - 1 - this.numLowBits); long numLongsForLowBits = numLongsForBits(numValues * numLowBits); if (numLongsForLowBits > Integer.MAX_VALUE) { throw new IllegalArgumentException("numLongsForLowBits too large to index a long array: " + numLongsForLowBits); } this.lowerLongs = new long[(int) numLongsForLowBits]; long numHighBitsClear = ((this.upperBound > 0) ? this.upperBound : 0) >>> this.numLowBits; assert numHighBitsClear <= (2 * this.numValues); long numHighBitsSet = this.numValues; long numLongsForHighBits = numLongsForBits(numHighBitsClear + numHighBitsSet); if (numLongsForHighBits > Integer.MAX_VALUE) { throw new IllegalArgumentException("numLongsForHighBits too large to index a long array: " + numLongsForHighBits); } this.upperLongs = new long[(int) numLongsForHighBits]; if (indexInterval < 2) { throw new IllegalArgumentException("indexInterval should at least 2: " + indexInterval); } // For the index: long maxHighValue = upperBound >>> this.numLowBits; long nIndexEntries = maxHighValue / indexInterval; // no zero value index entry this.numIndexEntries = (nIndexEntries >= 0) ? nIndexEntries : 0; long maxIndexEntry = maxHighValue + numValues - 1; // clear upper bits, set upper bits, start at zero this.nIndexEntryBits = (maxIndexEntry <= 0) ? 0 : (64 - Long.numberOfLeadingZeros(maxIndexEntry - 1)); long numLongsForIndexBits = numLongsForBits(numIndexEntries * nIndexEntryBits); if (numLongsForIndexBits > Integer.MAX_VALUE) { throw new IllegalArgumentException("numLongsForIndexBits too large to index a long array: " + numLongsForIndexBits); } this.upperZeroBitPositionIndex = new long[(int) numLongsForIndexBits]; this.currentEntryIndex = 0; this.indexInterval = indexInterval; }
public EliasFanoEncoder(long numValues, long upperBound, long indexInterval) { if (numValues < 0L) { throw new IllegalArgumentException("numValues should not be negative: " + numValues); } this.numValues = numValues; if ((numValues > 0L) && (upperBound < 0L)) { throw new IllegalArgumentException("upperBound should not be negative: " + upperBound + " when numValues > 0"); } this.upperBound = numValues > 0 ? upperBound : -1L; // if there is no value, -1 is the best upper bound int nLowBits = 0; if (this.numValues > 0) { // nLowBits = max(0; floor(2log(upperBound/numValues))) long lowBitsFac = this.upperBound / this.numValues; if (lowBitsFac > 0) { nLowBits = 63 - Long.numberOfLeadingZeros(lowBitsFac); // see Long.numberOfLeadingZeros javadocs } } this.numLowBits = nLowBits; this.lowerBitsMask = Long.MAX_VALUE >>> (Long.SIZE - 1 - this.numLowBits); long numLongsForLowBits = numLongsForBits(numValues * numLowBits); if (numLongsForLowBits > Integer.MAX_VALUE) { throw new IllegalArgumentException("numLongsForLowBits too large to index a long array: " + numLongsForLowBits); } this.lowerLongs = new long[(int) numLongsForLowBits]; long numHighBitsClear = ((this.upperBound > 0) ? this.upperBound : 0) >>> this.numLowBits; assert numHighBitsClear <= (2 * this.numValues); long numHighBitsSet = this.numValues; long numLongsForHighBits = numLongsForBits(numHighBitsClear + numHighBitsSet); if (numLongsForHighBits > Integer.MAX_VALUE) { throw new IllegalArgumentException("numLongsForHighBits too large to index a long array: " + numLongsForHighBits); } this.upperLongs = new long[(int) numLongsForHighBits]; if (indexInterval < 2) { throw new IllegalArgumentException("indexInterval should at least 2: " + indexInterval); } // For the index: long maxHighValue = upperBound >>> this.numLowBits; long nIndexEntries = maxHighValue / indexInterval; // no zero value index entry this.numIndexEntries = (nIndexEntries >= 0) ? nIndexEntries : 0; long maxIndexEntry = maxHighValue + numValues - 1; // clear upper bits, set upper bits, start at zero this.nIndexEntryBits = (maxIndexEntry <= 0) ? 0 : (64 - Long.numberOfLeadingZeros(maxIndexEntry)); long numLongsForIndexBits = numLongsForBits(numIndexEntries * nIndexEntryBits); if (numLongsForIndexBits > Integer.MAX_VALUE) { throw new IllegalArgumentException("numLongsForIndexBits too large to index a long array: " + numLongsForIndexBits); } this.upperZeroBitPositionIndex = new long[(int) numLongsForIndexBits]; this.currentEntryIndex = 0; this.indexInterval = indexInterval; }
private boolean nodesEqual(Builder.UnCompiledNode<T> node, int address) throws IOException { fst.readFirstRealArc(address, scratchArc); if (scratchArc.bytesPerArc != 0 && node.numArcs != scratchArc.numArcs) { return false; } for(int arcUpto=0;arcUpto<node.numArcs;arcUpto++) { final Builder.Arc arc = node.arcs[arcUpto]; if (arc.label != scratchArc.label || !arc.output.equals(scratchArc.output) || ((Builder.CompiledNode) arc.target).address != scratchArc.target || !arc.nextFinalOutput.equals(scratchArc.nextFinalOutput) || arc.isFinal != scratchArc.isFinal()) { return false; } if (scratchArc.isLast()) { if (arcUpto == node.numArcs-1) { return true; } else { return false; } } fst.readNextRealArc(scratchArc); } return false; }
private boolean nodesEqual(Builder.UnCompiledNode<T> node, int address) throws IOException { fst.readFirstRealArc(address, scratchArc); if (scratchArc.bytesPerArc != 0 && node.numArcs != scratchArc.numArcs) { return false; } for(int arcUpto=0;arcUpto<node.numArcs;arcUpto++) { final Builder.Arc<T> arc = node.arcs[arcUpto]; if (arc.label != scratchArc.label || !arc.output.equals(scratchArc.output) || ((Builder.CompiledNode) arc.target).address != scratchArc.target || !arc.nextFinalOutput.equals(scratchArc.nextFinalOutput) || arc.isFinal != scratchArc.isFinal()) { return false; } if (scratchArc.isLast()) { if (arcUpto == node.numArcs-1) { return true; } else { return false; } } fst.readNextRealArc(scratchArc); } return false; }
public static final String getJavaExecutableName() { String vmname = getSystemProperty("com.ibm.oti.vm.exe"); if (vmname == null) { vmname = getSystemProperty("java.vm.name"); // Sun phoneME if ("CVM".equals(vmname)) { vmname = getSystemProperty("java.home") + File.separator + "bin" + File.separator + "cvm"; } else { vmname = getSystemProperty("java.home") + File.separator + "bin" + File.separator + "java"; } } // derby-5490. workaround problem if executable name contains spaces if ( vmname.contains( " " ) ) { if ( getSystemProperty( "os.name" ).equals( "Mac OS X" ) ) { vmname = "java"; } } return vmname; }
public static final String getJavaExecutableName() { String vmname = getSystemProperty("com.ibm.oti.vm.exe"); if (vmname == null) { vmname = getSystemProperty("java.vm.name"); // Sun phoneME if ("CVM".equals(vmname)) { vmname = getSystemProperty("java.home") + File.separator + "bin" + File.separator + "cvm"; } else { vmname = getSystemProperty("java.home") + File.separator + "bin" + File.separator + "java"; } } // derby-5490. workaround problem if executable name contains spaces if ( vmname.indexOf( " " ) >= 0 ) { if ( getSystemProperty( "os.name" ).equals( "Mac OS X" ) ) { vmname = "java"; } } return vmname; }