buggy_function
stringlengths 1
391k
| fixed_function
stringlengths 0
392k
|
---|---|
public static synchronized void printException(String where, Exception e) {
if (e instanceof SQLException) {
SQLException se = (SQLException) e;
if (se.getSQLState() != null) { // SQLSTATE is NULL for a
if (se.getSQLState().equals("40001"))
System.out.println("deadlocked detected");
if (se.getSQLState().equals("40XL1"))
System.out.println(" lock timeout exception");
if (se.getSQLState().equals("23500"))
System.out.println(" duplicate key violation");
}
if (se.getNextException() != null) {
String m = se.getNextException().getSQLState();
System.out.println(se.getNextException().getMessage()
+ " SQLSTATE: " + m);
}
}
if (e.getMessage().equals(null)) {
System.out.println("NULL error message detected");
System.out.println("Here is the NULL exection - " + e.toString());
System.out.println("Stack trace of the NULL exception - ");
e.printStackTrace(System.out);
}
System.out.println("At this point - " + where
+ ", exception thrown was : " + e.getMessage());
}
| public static synchronized void printException(String where, Exception e) {
if (e instanceof SQLException) {
SQLException se = (SQLException) e;
if (se.getSQLState() != null) { // SQLSTATE is NULL for a
if (se.getSQLState().equals("40001"))
System.out.println("deadlocked detected");
if (se.getSQLState().equals("40XL1"))
System.out.println(" lock timeout exception");
if (se.getSQLState().equals("23500"))
System.out.println(" duplicate key violation");
}
if (se.getNextException() != null) {
String m = se.getNextException().getSQLState();
System.out.println(se.getNextException().getMessage()
+ " SQLSTATE: " + m);
}
}
if (e.getMessage() == null) {
System.out.println("NULL error message detected");
System.out.println("Here is the NULL exection - " + e.toString());
System.out.println("Stack trace of the NULL exception - ");
e.printStackTrace(System.out);
}
System.out.println("At this point - " + where
+ ", exception thrown was : " + e.getMessage());
}
|
public MonotonicAppendingLongBuffer(int initialPageCount, int pageSize) {
super(initialPageCount, pageSize);
averages = new float[pageSize];
}
| public MonotonicAppendingLongBuffer(int initialPageCount, int pageSize) {
super(initialPageCount, pageSize);
averages = new float[initialPageCount];
}
|
public void testBuild() throws IOException {
final String LF = System.getProperty("line.separator");
String input = "oneword" + LF + "twoword" + LF + "threeword";
PlainTextDictionary ptd = new PlainTextDictionary(new StringReader(input));
Directory ramDir = newDirectory();
SpellChecker spellChecker = new SpellChecker(ramDir);
spellChecker.indexDictionary(ptd);
String[] similar = spellChecker.suggestSimilar("treeword", 2);
assertEquals(2, similar.length);
assertEquals(similar[0], "threeword");
assertEquals(similar[1], "twoword");
spellChecker.close();
ramDir.close();
}
| public void testBuild() throws IOException {
final String LF = System.getProperty("line.separator");
String input = "oneword" + LF + "twoword" + LF + "threeword";
PlainTextDictionary ptd = new PlainTextDictionary(new StringReader(input));
Directory ramDir = newDirectory();
SpellChecker spellChecker = new SpellChecker(ramDir);
spellChecker.indexDictionary(ptd);
String[] similar = spellChecker.suggestSimilar("treeword", 2);
assertEquals(2, similar.length);
assertEquals(similar[0], "threeword");
assertEquals(similar[1], "oneword");
spellChecker.close();
ramDir.close();
}
|
public void testExtendedResultsCount() throws Exception {
assertJQ(req("qt",rh, SpellCheckComponent.COMPONENT_NAME, "true", SpellCheckComponent.SPELLCHECK_BUILD, "true", "q","bluo", SpellCheckComponent.SPELLCHECK_COUNT,"5", SpellCheckComponent.SPELLCHECK_EXTENDED_RESULTS,"false")
,"/spellcheck/suggestions/[0]=='bluo'"
,"/spellcheck/suggestions/[1]/numFound==5"
);
assertJQ(req("qt",rh, SpellCheckComponent.COMPONENT_NAME, "true", "q","bluo", SpellCheckComponent.SPELLCHECK_COUNT,"3", SpellCheckComponent.SPELLCHECK_EXTENDED_RESULTS,"true")
,"/spellcheck/suggestions/[1]/suggestion==[{'word':'blue','freq':1}, {'word':'blud','freq':1}, {'word':'boue','freq':1}]"
);
}
| public void testExtendedResultsCount() throws Exception {
assertJQ(req("qt",rh, SpellCheckComponent.COMPONENT_NAME, "true", SpellCheckComponent.SPELLCHECK_BUILD, "true", "q","bluo", SpellCheckComponent.SPELLCHECK_COUNT,"5", SpellCheckComponent.SPELLCHECK_EXTENDED_RESULTS,"false")
,"/spellcheck/suggestions/[0]=='bluo'"
,"/spellcheck/suggestions/[1]/numFound==5"
);
assertJQ(req("qt",rh, SpellCheckComponent.COMPONENT_NAME, "true", "q","bluo", SpellCheckComponent.SPELLCHECK_COUNT,"3", SpellCheckComponent.SPELLCHECK_EXTENDED_RESULTS,"true")
,"/spellcheck/suggestions/[1]/suggestion==[{'word':'blud','freq':1}, {'word':'blue','freq':1}, {'word':'blee','freq':1}]"
);
}
|
public String[] listAll() {
ensureOpen();
String[] res;
if (writer != null) {
res = writer.listAll();
} else {
res = entries.keySet().toArray(new String[entries.size()]);
// Add the segment name
String seg = fileName.substring(0, fileName.indexOf('.'));
for (int i = 0; i < res.length; i++) {
res[i] = seg + res[i];
}
}
return res;
}
| public String[] listAll() {
ensureOpen();
String[] res;
if (writer != null) {
res = writer.listAll();
} else {
res = entries.keySet().toArray(new String[entries.size()]);
// Add the segment name
String seg = IndexFileNames.parseSegmentName(fileName);
for (int i = 0; i < res.length; i++) {
res[i] = seg + res[i];
}
}
return res;
}
|
public static void prepareClass() throws Exception
{
LOCAL = FBUtilities.getLocalAddress();
tablename = "Keyspace4";
StorageService.instance.initServer();
// generate a fake endpoint for which we can spoof receiving/sending trees
REMOTE = InetAddress.getByName("127.0.0.2");
store = Table.open(tablename).getColumnFamilyStores().iterator().next();
cfname = store.columnFamily_;
}
| public static void prepareClass() throws Exception
{
LOCAL = FBUtilities.getLocalAddress();
tablename = "Keyspace5";
StorageService.instance.initServer();
// generate a fake endpoint for which we can spoof receiving/sending trees
REMOTE = InetAddress.getByName("127.0.0.2");
store = Table.open(tablename).getColumnFamilyStores().iterator().next();
cfname = store.columnFamily_;
}
|
public TestOrdValues(String name) {
super(name);
}
| public TestOrdValues(String name) {
super(name, false);
}
|
public StorageService()
{
MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
try
{
mbs.registerMBean(this, new ObjectName("org.apache.cassandra.service:type=StorageService"));
}
catch (Exception e)
{
throw new RuntimeException(e);
}
bootstrapSet = Multimaps.synchronizedSetMultimap(HashMultimap.<InetAddress, String>create());
/* register the verb handlers */
MessagingService.instance.registerVerbHandlers(Verb.BINARY, new BinaryVerbHandler());
MessagingService.instance.registerVerbHandlers(Verb.MUTATION, new RowMutationVerbHandler());
MessagingService.instance.registerVerbHandlers(Verb.READ_REPAIR, new ReadRepairVerbHandler());
MessagingService.instance.registerVerbHandlers(Verb.READ, new ReadVerbHandler());
MessagingService.instance.registerVerbHandlers(Verb.RANGE_SLICE, new RangeSliceVerbHandler());
// see BootStrapper for a summary of how the bootstrap verbs interact
MessagingService.instance.registerVerbHandlers(Verb.BOOTSTRAP_TOKEN, new BootStrapper.BootstrapTokenVerbHandler());
MessagingService.instance.registerVerbHandlers(Verb.STREAM_REQUEST, new StreamRequestVerbHandler() );
MessagingService.instance.registerVerbHandlers(Verb.STREAM_INITIATE, new StreamInitiateVerbHandler());
MessagingService.instance.registerVerbHandlers(Verb.STREAM_INITIATE_DONE, new StreamInitiateDoneVerbHandler());
MessagingService.instance.registerVerbHandlers(Verb.STREAM_FINISHED, new StreamFinishedVerbHandler());
MessagingService.instance.registerVerbHandlers(Verb.READ_RESPONSE, new ResponseVerbHandler());
MessagingService.instance.registerVerbHandlers(Verb.TREE_REQUEST, new TreeRequestVerbHandler());
MessagingService.instance.registerVerbHandlers(Verb.TREE_RESPONSE, new AntiEntropyService.TreeResponseVerbHandler());
MessagingService.instance.registerVerbHandlers(Verb.JOIN, new GossiperJoinVerbHandler());
MessagingService.instance.registerVerbHandlers(Verb.GOSSIP_DIGEST_SYN, new GossipDigestSynVerbHandler());
MessagingService.instance.registerVerbHandlers(Verb.GOSSIP_DIGEST_ACK, new GossipDigestAckVerbHandler());
MessagingService.instance.registerVerbHandlers(Verb.GOSSIP_DIGEST_ACK2, new GossipDigestAck2VerbHandler());
replicationStrategies = new HashMap<String, AbstractReplicationStrategy>();
for (String table : DatabaseDescriptor.getTables())
{
AbstractReplicationStrategy strat = getReplicationStrategy(tokenMetadata_, table);
replicationStrategies.put(table, strat);
}
replicationStrategies = Collections.unmodifiableMap(replicationStrategies);
// spin up the streaming serivice so it is available for jmx tools.
if (StreamingService.instance == null)
throw new RuntimeException("Streaming service is unavailable.");
}
| public StorageService()
{
MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
try
{
mbs.registerMBean(this, new ObjectName("org.apache.cassandra.service:type=StorageService"));
}
catch (Exception e)
{
throw new RuntimeException(e);
}
bootstrapSet = Multimaps.synchronizedSetMultimap(HashMultimap.<InetAddress, String>create());
/* register the verb handlers */
MessagingService.instance.registerVerbHandlers(Verb.BINARY, new BinaryVerbHandler());
MessagingService.instance.registerVerbHandlers(Verb.MUTATION, new RowMutationVerbHandler());
MessagingService.instance.registerVerbHandlers(Verb.READ_REPAIR, new ReadRepairVerbHandler());
MessagingService.instance.registerVerbHandlers(Verb.READ, new ReadVerbHandler());
MessagingService.instance.registerVerbHandlers(Verb.RANGE_SLICE, new RangeSliceVerbHandler());
// see BootStrapper for a summary of how the bootstrap verbs interact
MessagingService.instance.registerVerbHandlers(Verb.BOOTSTRAP_TOKEN, new BootStrapper.BootstrapTokenVerbHandler());
MessagingService.instance.registerVerbHandlers(Verb.STREAM_REQUEST, new StreamRequestVerbHandler() );
MessagingService.instance.registerVerbHandlers(Verb.STREAM_INITIATE, new StreamInitiateVerbHandler());
MessagingService.instance.registerVerbHandlers(Verb.STREAM_INITIATE_DONE, new StreamInitiateDoneVerbHandler());
MessagingService.instance.registerVerbHandlers(Verb.STREAM_FINISHED, new StreamFinishedVerbHandler());
MessagingService.instance.registerVerbHandlers(Verb.READ_RESPONSE, new ResponseVerbHandler());
MessagingService.instance.registerVerbHandlers(Verb.TREE_REQUEST, new TreeRequestVerbHandler());
MessagingService.instance.registerVerbHandlers(Verb.TREE_RESPONSE, new AntiEntropyService.TreeResponseVerbHandler());
MessagingService.instance.registerVerbHandlers(Verb.JOIN, new GossiperJoinVerbHandler());
MessagingService.instance.registerVerbHandlers(Verb.GOSSIP_DIGEST_SYN, new GossipDigestSynVerbHandler());
MessagingService.instance.registerVerbHandlers(Verb.GOSSIP_DIGEST_ACK, new GossipDigestAckVerbHandler());
MessagingService.instance.registerVerbHandlers(Verb.GOSSIP_DIGEST_ACK2, new GossipDigestAck2VerbHandler());
replicationStrategies = new HashMap<String, AbstractReplicationStrategy>();
for (String table : DatabaseDescriptor.getNonSystemTables())
{
AbstractReplicationStrategy strat = getReplicationStrategy(tokenMetadata_, table);
replicationStrategies.put(table, strat);
}
replicationStrategies = Collections.unmodifiableMap(replicationStrategies);
// spin up the streaming serivice so it is available for jmx tools.
if (StreamingService.instance == null)
throw new RuntimeException("Streaming service is unavailable.");
}
|
public void testSearch() throws Exception {
Query query = QueryParser.parse("test", "contents", analyzer);
Hits hits = searcher.search(query);
assertEquals("Find document(s)", 2, hits.length());
}
| public void testSearch() throws Exception {
Query query = new QueryParser("contents",analyzer).parse("test");
Hits hits = searcher.search(query);
assertEquals("Find document(s)", 2, hits.length());
}
|
public void setScorer(Scorer scorer) {
super.setScorer(scorer);
// TODO: might be cleaner to lazy-init 'source' and set scorer after?
assert readerContext != null;
try {
Map<String,Object> context = new HashMap<String,Object>();
assert scorer != null;
context.put("scorer", new ScoreFunctionValues(scorer));
scores = source.getValues(context, readerContext);
} catch (IOException e) {
throw new RuntimeException(e);
}
}
| public void setScorer(Scorer scorer) {
super.setScorer(scorer);
// TODO: might be cleaner to lazy-init 'source' and set scorer after?
assert readerContext != null;
try {
Map<String,Object> context = new HashMap<String,Object>();
assert scorer != null;
context.put("scorer", scorer);
scores = source.getValues(context, readerContext);
} catch (IOException e) {
throw new RuntimeException(e);
}
}
|
public ConcurrentUpdateSolrServer(String solrServerUrl,
HttpClient client, int queueSize, int threadCount) {
this(solrServerUrl, null, queueSize, threadCount, Executors.newCachedThreadPool(
new SolrjNamedThreadFactory("concurrentUpdateScheduler")));
shutdownExecutor = true;
}
| public ConcurrentUpdateSolrServer(String solrServerUrl,
HttpClient client, int queueSize, int threadCount) {
this(solrServerUrl, client, queueSize, threadCount, Executors.newCachedThreadPool(
new SolrjNamedThreadFactory("concurrentUpdateScheduler")));
shutdownExecutor = true;
}
|
public Token getBootstrapToken()
{
Range range = getLocalPrimaryRange();
List<DecoratedKey> keys = new ArrayList<DecoratedKey>();
for (ColumnFamilyStore cfs : ColumnFamilyStore.all())
{
for (IndexSummary.KeyPosition info: cfs.allIndexPositions())
{
if (range.contains(info.key.token))
keys.add(info.key);
}
}
FBUtilities.sortSampledKeys(keys, range);
if (keys.size() < 3)
return partitioner_.getRandomToken();
else
return keys.get(keys.size() / 2).token;
}
| public Token getBootstrapToken()
{
Range range = getLocalPrimaryRange();
List<DecoratedKey> keys = new ArrayList<DecoratedKey>();
for (ColumnFamilyStore cfs : ColumnFamilyStore.all())
{
for (IndexSummary.KeyPosition info: cfs.allIndexPositions())
{
if (range.contains(info.key.token))
keys.add(info.key);
}
}
FBUtilities.sortSampledKeys(keys, range);
if (keys.size() < 3)
return partitioner_.midpoint(range.left, range.right);
else
return keys.get(keys.size() / 2).token;
}
|
public void setText(CharacterIterator newText) {
start = newText.getBeginIndex();
end = newText.getEndIndex();
text = newText;
current = newText.getIndex();
}
| public void setText(CharacterIterator newText) {
start = newText.getBeginIndex();
end = newText.getEndIndex();
text = newText;
current = start;
}
|
public void testUpdateDelteSlices() {
DocumentsWriterDeleteQueue queue = new DocumentsWriterDeleteQueue();
final int size = 200 + random().nextInt(500) * RANDOM_MULTIPLIER;
Integer[] ids = new Integer[size];
for (int i = 0; i < ids.length; i++) {
ids[i] = random().nextInt();
}
DeleteSlice slice1 = queue.newSlice();
DeleteSlice slice2 = queue.newSlice();
BufferedDeletes bd1 = new BufferedDeletes();
BufferedDeletes bd2 = new BufferedDeletes();
int last1 = 0;
int last2 = 0;
Set<Term> uniqueValues = new HashSet<Term>();
for (int j = 0; j < ids.length; j++) {
Integer i = ids[j];
// create an array here since we compare identity below against tailItem
Term[] term = new Term[] {new Term("id", i.toString())};
uniqueValues.add(term[0]);
queue.addDelete(term);
if (random().nextInt(20) == 0 || j == ids.length - 1) {
queue.updateSlice(slice1);
assertTrue(slice1.isTailItem(term));
slice1.apply(bd1, j);
assertAllBetween(last1, j, bd1, ids);
last1 = j + 1;
}
if (random().nextInt(10) == 5 || j == ids.length - 1) {
queue.updateSlice(slice2);
assertTrue(slice2.isTailItem(term));
slice2.apply(bd2, j);
assertAllBetween(last2, j, bd2, ids);
last2 = j + 1;
}
assertEquals(uniqueValues.size(), queue.numGlobalTermDeletes());
}
assertEquals(uniqueValues, bd1.terms.keySet());
assertEquals(uniqueValues, bd2.terms.keySet());
HashSet<Term> frozenSet = new HashSet<Term>();
for (Term t : queue.freezeGlobalBuffer(null).termsIterable()) {
BytesRef bytesRef = new BytesRef();
bytesRef.copyBytes(t.bytes);
frozenSet.add(new Term(t.field, bytesRef));
}
assertEquals(uniqueValues, frozenSet);
assertEquals("num deletes must be 0 after freeze", 0, queue
.numGlobalTermDeletes());
}
| public void testUpdateDelteSlices() {
DocumentsWriterDeleteQueue queue = new DocumentsWriterDeleteQueue();
final int size = 200 + random().nextInt(500) * RANDOM_MULTIPLIER;
Integer[] ids = new Integer[size];
for (int i = 0; i < ids.length; i++) {
ids[i] = random().nextInt();
}
DeleteSlice slice1 = queue.newSlice();
DeleteSlice slice2 = queue.newSlice();
BufferedDeletes bd1 = new BufferedDeletes();
BufferedDeletes bd2 = new BufferedDeletes();
int last1 = 0;
int last2 = 0;
Set<Term> uniqueValues = new HashSet<Term>();
for (int j = 0; j < ids.length; j++) {
Integer i = ids[j];
// create an array here since we compare identity below against tailItem
Term[] term = new Term[] {new Term("id", i.toString())};
uniqueValues.add(term[0]);
queue.addDelete(term);
if (random().nextInt(20) == 0 || j == ids.length - 1) {
queue.updateSlice(slice1);
assertTrue(slice1.isTailItem(term));
slice1.apply(bd1, j);
assertAllBetween(last1, j, bd1, ids);
last1 = j + 1;
}
if (random().nextInt(10) == 5 || j == ids.length - 1) {
queue.updateSlice(slice2);
assertTrue(slice2.isTailItem(term));
slice2.apply(bd2, j);
assertAllBetween(last2, j, bd2, ids);
last2 = j + 1;
}
assertEquals(j+1, queue.numGlobalTermDeletes());
}
assertEquals(uniqueValues, bd1.terms.keySet());
assertEquals(uniqueValues, bd2.terms.keySet());
HashSet<Term> frozenSet = new HashSet<Term>();
for (Term t : queue.freezeGlobalBuffer(null).termsIterable()) {
BytesRef bytesRef = new BytesRef();
bytesRef.copyBytes(t.bytes);
frozenSet.add(new Term(t.field, bytesRef));
}
assertEquals(uniqueValues, frozenSet);
assertEquals("num deletes must be 0 after freeze", 0, queue
.numGlobalTermDeletes());
}
|
public String toString()
{
return getFilename() + "/" + StringUtils.join(sections, ",") + "\n\t progress=" + progress + "/" + size + " - " + progress*100/size + "%";
}
| public String toString()
{
return getFilename() + " sections=" + sections.size() + " progress=" + progress + "/" + size + " - " + progress*100/size + "%";
}
|
public int run(String[] args) throws Exception {
/**
Option seqOpt = obuilder.withLongName("seqFile").withRequired(false).withArgument(
abuilder.withName("seqFile").withMinimum(1).withMaximum(1).create()).withDescription(
"The Sequence File containing the Vectors").withShortName("s").create();
Option dirOpt = obuilder.withLongName("seqDirectory").withRequired(false).withArgument(
abuilder.withName("seqDirectory").withMinimum(1).withMaximum(1).create())
.withDescription("The directory containing Sequence File of Vectors")
.withShortName("d").create();
*/
addInputOption();
addOutputOption();
addOption("useKey", "u", "If the Key is a vector than dump that instead", false);
addOption("printKey", "p", "Print out the key as well, delimited by tab (or the value if useKey is true", false);
addOption("dictionary", "d", "The dictionary file.", false);
addOption("dictionaryType", "dt", "The dictionary file type (text|seqfile)", false);
addOption("csv", "c", "Output the Vector as CSV. Otherwise it substitutes in the terms for vector cell entries",
false);
addOption("namesAsComments", "n", "If using CSV output, optionally add a comment line for each NamedVector "
+ "(if the vector is one) printing out the name", false);
addOption("nameOnly", "N", "Use the name as the value for each NamedVector (skip other vectors)", false);
addOption("sortVectors", "sort", "Sort output key/value pairs of the vector entries in abs magnitude "
+ "descending order", false);
addOption("quiet", "q", "Print only file contents", false);
addOption("sizeOnly", "sz", "Dump only the size of the vector", false);
addOption("numItems", "ni", "Output at most <n> vecors", false);
addOption("vectorSize", "vs", "Truncate vectors to <vs> length when dumping (most useful when in"
+ " conjunction with -sort", false);
addOption(buildOption("filter", "fi", "Only dump out those vectors whose name matches the filter."
+ " Multiple items may be specified by repeating the argument.", true, 1, Integer.MAX_VALUE, false, null));
if (parseArguments(args, false, true) == null) {
return -1;
}
Path[] pathArr;
Configuration conf = new Configuration();
FileSystem fs = FileSystem.get(conf);
Path input = getInputPath();
FileStatus fileStatus = fs.getFileStatus(input);
if (fileStatus.isDir()) {
pathArr = FileUtil.stat2Paths(fs.listStatus(input, new OutputFilesFilter()));
} else {
FileStatus[] inputPaths = fs.globStatus(input);
pathArr = new Path[inputPaths.length];
int i = 0;
for (FileStatus fstatus : inputPaths) {
pathArr[i++] = fstatus.getPath();
}
}
String dictionaryType = getOption("dictionaryType", "text");
boolean sortVectors = hasOption("sortVectors");
boolean quiet = hasOption("quiet");
if (!quiet) {
log.info("Sort? {}", sortVectors);
}
String[] dictionary = null;
if (hasOption("dictionary")) {
String dictFile = getOption("dictionary");
if ("text".equals(dictionaryType)) {
dictionary = VectorHelper.loadTermDictionary(new File(dictFile));
} else if ("sequencefile".equals(dictionaryType)) {
dictionary = VectorHelper.loadTermDictionary(conf, dictFile);
} else {
//TODO: support Lucene's FST as a dictionary type
throw new IOException("Invalid dictionary type: " + dictionaryType);
}
}
Set<String> filters;
if (hasOption("filter")) {
filters = Sets.newHashSet(getOptions("filter"));
} else {
filters = null;
}
boolean useCSV = hasOption("csv");
boolean sizeOnly = hasOption("sizeOnly");
boolean nameOnly = hasOption("nameOnly");
boolean namesAsComments = hasOption("namesAsComments");
boolean transposeKeyValue = hasOption("vectorAsKey");
Writer writer;
boolean shouldClose;
File output = getOutputFile();
if (output != null) {
shouldClose = true;
writer = Files.newWriter(output, Charsets.UTF_8);
} else {
shouldClose = false;
writer = new OutputStreamWriter(System.out, Charsets.UTF_8);
}
try {
boolean printKey = hasOption("printKey");
if (useCSV && dictionary != null) {
writer.write("#");
for (int j = 0; j < dictionary.length; j++) {
writer.write(dictionary[j]);
if (j < dictionary.length - 1) {
writer.write(',');
}
}
writer.write('\n');
}
Long numItems = null;
if (hasOption("numItems")) {
numItems = Long.parseLong(getOption("numItems"));
if (quiet) {
writer.append("#Max Items to dump: ").append(String.valueOf(numItems)).append('\n');
}
}
int maxIndexesPerVector = hasOption("vectorSize")
? Integer.parseInt(getOption("vectorSize"))
: Integer.MAX_VALUE;
long itemCount = 0;
int fileCount = 0;
for (Path path : pathArr) {
if (numItems != null && numItems <= itemCount) {
break;
}
if (quiet) {
log.info("Processing file '{}' ({}/{})", path, ++fileCount, pathArr.length);
}
SequenceFileIterable<Writable, Writable> iterable =
new SequenceFileIterable<Writable, Writable>(path, true, conf);
Iterator<Pair<Writable, Writable>> iterator = iterable.iterator();
long i = 0;
while (iterator.hasNext() && (numItems == null || itemCount < numItems)) {
Pair<Writable, Writable> record = iterator.next();
Writable keyWritable = record.getFirst();
Writable valueWritable = record.getSecond();
if (printKey) {
Writable notTheVectorWritable = transposeKeyValue ? valueWritable : keyWritable;
writer.write(notTheVectorWritable.toString());
writer.write('\t');
}
Vector vector;
try {
vector = ((VectorWritable)
(transposeKeyValue ? keyWritable : valueWritable)).get();
} catch (ClassCastException e) {
if ((transposeKeyValue ? keyWritable : valueWritable)
instanceof WeightedPropertyVectorWritable) {
vector =
((WeightedPropertyVectorWritable)
(transposeKeyValue ? keyWritable : valueWritable)).getVector();
} else {
throw e;
}
}
if (filters != null
&& vector instanceof NamedVector
&& !filters.contains(((NamedVector) vector).getName())) {
//we are filtering out this item, skip
continue;
}
if (sizeOnly) {
if (vector instanceof NamedVector) {
writer.write(((NamedVector) vector).getName());
writer.write(":");
} else {
writer.write(String.valueOf(i++));
writer.write(":");
}
writer.write(String.valueOf(vector.size()));
writer.write('\n');
} else if (nameOnly) {
if (vector instanceof NamedVector) {
writer.write(((NamedVector) vector).getName());
writer.write('\n');
}
} else {
String fmtStr;
if (useCSV) {
fmtStr = VectorHelper.vectorToCSVString(vector, namesAsComments);
} else {
fmtStr = VectorHelper.vectorToJson(vector, dictionary, maxIndexesPerVector,
sortVectors);
}
writer.write(fmtStr);
writer.write('\n');
}
itemCount++;
}
}
writer.flush();
} finally {
if (shouldClose) {
Closeables.closeQuietly(writer);
}
}
return 0;
}
| public int run(String[] args) throws Exception {
/**
Option seqOpt = obuilder.withLongName("seqFile").withRequired(false).withArgument(
abuilder.withName("seqFile").withMinimum(1).withMaximum(1).create()).withDescription(
"The Sequence File containing the Vectors").withShortName("s").create();
Option dirOpt = obuilder.withLongName("seqDirectory").withRequired(false).withArgument(
abuilder.withName("seqDirectory").withMinimum(1).withMaximum(1).create())
.withDescription("The directory containing Sequence File of Vectors")
.withShortName("d").create();
*/
addInputOption();
addOutputOption();
addOption("useKey", "u", "If the Key is a vector than dump that instead", false);
addOption("printKey", "p", "Print out the key as well, delimited by tab (or the value if useKey is true", false);
addOption("dictionary", "d", "The dictionary file.", false);
addOption("dictionaryType", "dt", "The dictionary file type (text|seqfile)", false);
addOption("csv", "c", "Output the Vector as CSV. Otherwise it substitutes in the terms for vector cell entries",
false);
addOption("namesAsComments", "n", "If using CSV output, optionally add a comment line for each NamedVector "
+ "(if the vector is one) printing out the name", false);
addOption("nameOnly", "N", "Use the name as the value for each NamedVector (skip other vectors)", false);
addOption("sortVectors", "sort", "Sort output key/value pairs of the vector entries in abs magnitude "
+ "descending order", false);
addOption("quiet", "q", "Print only file contents", false);
addOption("sizeOnly", "sz", "Dump only the size of the vector", false);
addOption("numItems", "ni", "Output at most <n> vecors", false);
addOption("vectorSize", "vs", "Truncate vectors to <vs> length when dumping (most useful when in"
+ " conjunction with -sort", false);
addOption(buildOption("filter", "fi", "Only dump out those vectors whose name matches the filter."
+ " Multiple items may be specified by repeating the argument.", true, 1, Integer.MAX_VALUE, false, null));
if (parseArguments(args, false, true) == null) {
return -1;
}
Path[] pathArr;
Configuration conf = new Configuration();
FileSystem fs = FileSystem.get(conf);
Path input = getInputPath();
FileStatus fileStatus = fs.getFileStatus(input);
if (fileStatus.isDir()) {
pathArr = FileUtil.stat2Paths(fs.listStatus(input, new OutputFilesFilter()));
} else {
FileStatus[] inputPaths = fs.globStatus(input);
pathArr = new Path[inputPaths.length];
int i = 0;
for (FileStatus fstatus : inputPaths) {
pathArr[i++] = fstatus.getPath();
}
}
String dictionaryType = getOption("dictionaryType", "text");
boolean sortVectors = hasOption("sortVectors");
boolean quiet = hasOption("quiet");
if (!quiet) {
log.info("Sort? {}", sortVectors);
}
String[] dictionary = null;
if (hasOption("dictionary")) {
String dictFile = getOption("dictionary");
if ("text".equals(dictionaryType)) {
dictionary = VectorHelper.loadTermDictionary(new File(dictFile));
} else if ("sequencefile".equals(dictionaryType)) {
dictionary = VectorHelper.loadTermDictionary(conf, dictFile);
} else {
//TODO: support Lucene's FST as a dictionary type
throw new IOException("Invalid dictionary type: " + dictionaryType);
}
}
Set<String> filters;
if (hasOption("filter")) {
filters = Sets.newHashSet(getOptions("filter"));
} else {
filters = null;
}
boolean useCSV = hasOption("csv");
boolean sizeOnly = hasOption("sizeOnly");
boolean nameOnly = hasOption("nameOnly");
boolean namesAsComments = hasOption("namesAsComments");
boolean transposeKeyValue = hasOption("vectorAsKey");
Writer writer;
boolean shouldClose;
File output = getOutputFile();
if (output != null) {
shouldClose = true;
writer = Files.newWriter(output, Charsets.UTF_8);
} else {
shouldClose = false;
writer = new OutputStreamWriter(System.out, Charsets.UTF_8);
}
try {
boolean printKey = hasOption("printKey");
if (useCSV && dictionary != null) {
writer.write("#");
for (int j = 0; j < dictionary.length; j++) {
writer.write(dictionary[j]);
if (j < dictionary.length - 1) {
writer.write(',');
}
}
writer.write('\n');
}
Long numItems = null;
if (hasOption("numItems")) {
numItems = Long.parseLong(getOption("numItems"));
if (quiet) {
writer.append("#Max Items to dump: ").append(String.valueOf(numItems)).append('\n');
}
}
int maxIndexesPerVector = hasOption("vectorSize")
? Integer.parseInt(getOption("vectorSize"))
: Integer.MAX_VALUE;
long itemCount = 0;
int fileCount = 0;
for (Path path : pathArr) {
if (numItems != null && numItems <= itemCount) {
break;
}
if (quiet) {
log.info("Processing file '{}' ({}/{})", path, ++fileCount, pathArr.length);
}
SequenceFileIterable<Writable, Writable> iterable =
new SequenceFileIterable<Writable, Writable>(path, true, conf);
Iterator<Pair<Writable, Writable>> iterator = iterable.iterator();
long i = 0;
while (iterator.hasNext() && (numItems == null || itemCount < numItems)) {
Pair<Writable, Writable> record = iterator.next();
Writable keyWritable = record.getFirst();
Writable valueWritable = record.getSecond();
if (printKey) {
Writable notTheVectorWritable = transposeKeyValue ? valueWritable : keyWritable;
writer.write(notTheVectorWritable.toString());
writer.write('\t');
}
Vector vector;
try {
vector = ((VectorWritable)
(transposeKeyValue ? keyWritable : valueWritable)).get();
} catch (ClassCastException e) {
if ((transposeKeyValue ? keyWritable : valueWritable)
instanceof WeightedPropertyVectorWritable) {
vector =
((WeightedPropertyVectorWritable)
(transposeKeyValue ? keyWritable : valueWritable)).getVector();
} else {
throw e;
}
}
if (filters != null
&& vector instanceof NamedVector
&& !filters.contains(((NamedVector) vector).getName())) {
//we are filtering out this item, skip
continue;
}
if (sizeOnly) {
if (vector instanceof NamedVector) {
writer.write(((NamedVector) vector).getName());
writer.write(":");
} else {
writer.write(String.valueOf(i++));
writer.write(":");
}
writer.write(String.valueOf(vector.size()));
writer.write('\n');
} else if (nameOnly) {
if (vector instanceof NamedVector) {
writer.write(((NamedVector) vector).getName());
writer.write('\n');
}
} else {
String fmtStr;
if (useCSV) {
fmtStr = VectorHelper.vectorToCSVString(vector, namesAsComments);
} else {
fmtStr = VectorHelper.vectorToJson(vector, dictionary, maxIndexesPerVector,
sortVectors);
}
writer.write(fmtStr);
writer.write('\n');
}
itemCount++;
}
}
writer.flush();
} finally {
if (shouldClose) {
Closeables.close(writer, true);
}
}
return 0;
}
|
private void initParents(IndexReader reader, int first) throws IOException {
if (reader.maxDoc() == first) {
return;
}
// it's ok to use MultiFields because we only iterate on one posting list.
// breaking it to loop over the leaves() only complicates code for no
// apparent gain.
DocsAndPositionsEnum positions = MultiFields.getTermPositionsEnum(reader, null,
Consts.FIELD_PAYLOADS, Consts.PAYLOAD_PARENT_BYTES_REF,
DocsAndPositionsEnum.FLAG_PAYLOADS);
// shouldn't really happen, if it does, something's wrong
if (positions == null || positions.advance(first) == DocIdSetIterator.NO_MORE_DOCS) {
throw new CorruptIndexException("Missing parent data for category " + first);
}
int num = reader.maxDoc();
for (int i = first; i < num; i++) {
if (positions.docID() == i) {
if (positions.freq() == 0) { // shouldn't happen
throw new CorruptIndexException("Missing parent data for category " + i);
}
parents[i] = positions.nextPosition();
if (positions.nextDoc() == DocIdSetIterator.NO_MORE_DOCS) {
if (i + 1 < num) {
throw new CorruptIndexException("Missing parent data for category "+ (i + 1));
}
break;
}
} else { // this shouldn't happen
throw new CorruptIndexException("Missing parent data for category " + i);
}
}
}
/**
* Adds the given ordinal/parent info and returns either a new instance if the
* underlying array had to grow, or this instance otherwise.
* <p>
* <b>NOTE:</b> you should call this method from a thread-safe code.
*/
ParallelTaxonomyArrays add(int ordinal, int parentOrdinal) {
if (ordinal >= parents.length) {
int[] newarray = ArrayUtil.grow(parents);
newarray[ordinal] = parentOrdinal;
return new ParallelTaxonomyArrays(newarray);
}
parents[ordinal] = parentOrdinal;
return this;
}
| private void initParents(IndexReader reader, int first) throws IOException {
if (reader.maxDoc() == first) {
return;
}
// it's ok to use MultiFields because we only iterate on one posting list.
// breaking it to loop over the leaves() only complicates code for no
// apparent gain.
DocsAndPositionsEnum positions = MultiFields.getTermPositionsEnum(reader, null,
Consts.FIELD_PAYLOADS, Consts.PAYLOAD_PARENT_BYTES_REF,
DocsAndPositionsEnum.FLAG_PAYLOADS);
// shouldn't really happen, if it does, something's wrong
if (positions == null || positions.advance(first) == DocIdSetIterator.NO_MORE_DOCS) {
throw new CorruptIndexException("Missing parent data for category " + first);
}
int num = reader.maxDoc();
for (int i = first; i < num; i++) {
if (positions.docID() == i) {
if (positions.freq() == 0) { // shouldn't happen
throw new CorruptIndexException("Missing parent data for category " + i);
}
parents[i] = positions.nextPosition();
if (positions.nextDoc() == DocIdSetIterator.NO_MORE_DOCS) {
if (i + 1 < num) {
throw new CorruptIndexException("Missing parent data for category "+ (i + 1));
}
break;
}
} else { // this shouldn't happen
throw new CorruptIndexException("Missing parent data for category " + i);
}
}
}
/**
* Adds the given ordinal/parent info and returns either a new instance if the
* underlying array had to grow, or this instance otherwise.
* <p>
* <b>NOTE:</b> you should call this method from a thread-safe code.
*/
ParallelTaxonomyArrays add(int ordinal, int parentOrdinal) {
if (ordinal >= parents.length) {
int[] newarray = ArrayUtil.grow(parents, ordinal + 1);
newarray[ordinal] = parentOrdinal;
return new ParallelTaxonomyArrays(newarray);
}
parents[ordinal] = parentOrdinal;
return this;
}
|
private int getConnFromDatabaseName() throws DRDAProtocolException
{
Properties p = new Properties();
databaseAccessException = null;
//if we haven't got the correlation token yet, use session number for drdaID
if (session.drdaID == null)
session.drdaID = leftBrace + session.connNum + rightBrace;
p.put(Attribute.DRDAID_ATTR, session.drdaID);
try {
database.makeConnection(p);
} catch (SQLException se) {
String sqlState = se.getSQLState();
// need to set the security check code based on the reason the connection
// was denied, Cloudscape doesn't say whether the userid or password caused
// the problem, so we will just return userid invalid
databaseAccessException = se;
for (; se != null; se = se.getNextException())
{
if (SanityManager.DEBUG)
trace(se.getMessage());
println2Log(database.dbName, session.drdaID, se.getMessage());
}
if (sqlState.regionMatches(0,SQLState.LOGIN_FAILED,0,5))
return CodePoint.SECCHKCD_USERIDINVALID;
return 0;
}
catch (Exception e)
{
// If cloudscape has shut down for some reason,
// we will send an agent error and then try to
// get the driver loaded again. We have to get
// rid of the client first in case they are holding
// the DriverManager lock.
println2Log(database.dbName, session.drdaID,
"Driver not loaded"
+ e.getMessage());
try {
agentError("Driver not loaded");
}
catch (DRDAProtocolException dpe)
{
// Retry starting the server before rethrowing
// the protocol exception. Then hopfully all
// will be well when they try again.
try {
server.startNetworkServer();
} catch (Exception re) {
println2Log(database.dbName, session.drdaID, "Failed attempt to reload driver " +re.getMessage() );
}
throw dpe;
}
}
// Everything worked so log connection to the database.
if (getLogConnections())
println2Log(database.dbName, session.drdaID,
"Cloudscape Network Server connected to database " +
database.dbName);
return 0;
}
| private int getConnFromDatabaseName() throws DRDAProtocolException
{
Properties p = new Properties();
databaseAccessException = null;
//if we haven't got the correlation token yet, use session number for drdaID
if (session.drdaID == null)
session.drdaID = leftBrace + session.connNum + rightBrace;
p.put(Attribute.DRDAID_ATTR, session.drdaID);
try {
database.makeConnection(p);
} catch (SQLException se) {
String sqlState = se.getSQLState();
// need to set the security check code based on the reason the connection
// was denied, Cloudscape doesn't say whether the userid or password caused
// the problem, so we will just return userid invalid
databaseAccessException = se;
for (; se != null; se = se.getNextException())
{
if (SanityManager.DEBUG)
trace(se.getMessage());
println2Log(database.dbName, session.drdaID, se.getMessage());
}
if (sqlState.regionMatches(0,SQLState.LOGIN_FAILED,0,5))
return CodePoint.SECCHKCD_USERIDINVALID;
return 0;
}
catch (Exception e)
{
// If cloudscape has shut down for some reason,
// we will send an agent error and then try to
// get the driver loaded again. We have to get
// rid of the client first in case they are holding
// the DriverManager lock.
println2Log(database.dbName, session.drdaID,
"Driver not loaded"
+ e.getMessage());
try {
agentError("Driver not loaded");
}
catch (DRDAProtocolException dpe)
{
// Retry starting the server before rethrowing
// the protocol exception. Then hopfully all
// will be well when they try again.
try {
server.startNetworkServer();
} catch (Exception re) {
println2Log(database.dbName, session.drdaID, "Failed attempt to reload driver " +re.getMessage() );
}
throw dpe;
}
}
// Everything worked so log connection to the database.
if (getLogConnections())
println2Log(database.dbName, session.drdaID,
"Apache Derby Network Server connected to database " +
database.dbName);
return 0;
}
|
private void showFromZooKeeper(SolrQueryRequest req, SolrQueryResponse rsp,
CoreContainer coreContainer) throws KeeperException,
InterruptedException, UnsupportedEncodingException {
SolrZkClient zkClient = coreContainer.getZkController().getZkClient();
String adminFile = getAdminFileFromZooKeeper(req, rsp, zkClient);
if (adminFile == null) {
return;
}
// Show a directory listing
List<String> children = zkClient.getChildren(adminFile, null, true);
if (children.size() > 0) {
NamedList<SimpleOrderedMap<Object>> files = new SimpleOrderedMap<SimpleOrderedMap<Object>>();
for (String f : children) {
if (isHiddenFile(rsp, f)) {
continue;
}
SimpleOrderedMap<Object> fileInfo = new SimpleOrderedMap<Object>();
files.add(f, fileInfo);
List<String> fchildren = zkClient.getChildren(adminFile, null, true);
if (fchildren.size() > 0) {
fileInfo.add("directory", true);
} else {
// TODO? content type
fileInfo.add("size", f.length());
}
// TODO: ?
// fileInfo.add( "modified", new Date( f.lastModified() ) );
}
rsp.add("files", files);
} else {
// Include the file contents
// The file logic depends on RawResponseWriter, so force its use.
ModifiableSolrParams params = new ModifiableSolrParams(req.getParams());
params.set(CommonParams.WT, "raw");
req.setParams(params);
ContentStreamBase content = new ContentStreamBase.ByteArrayStream(zkClient.getData(adminFile, null, null, true), adminFile);
content.setContentType(req.getParams().get(USE_CONTENT_TYPE));
rsp.add(RawResponseWriter.CONTENT, content);
}
rsp.setHttpCaching(false);
}
| private void showFromZooKeeper(SolrQueryRequest req, SolrQueryResponse rsp,
CoreContainer coreContainer) throws KeeperException,
InterruptedException, UnsupportedEncodingException {
SolrZkClient zkClient = coreContainer.getZkController().getZkClient();
String adminFile = getAdminFileFromZooKeeper(req, rsp, zkClient);
if (adminFile == null) {
return;
}
// Show a directory listing
List<String> children = zkClient.getChildren(adminFile, null, true);
if (children.size() > 0) {
NamedList<SimpleOrderedMap<Object>> files = new SimpleOrderedMap<SimpleOrderedMap<Object>>();
for (String f : children) {
if (isHiddenFile(rsp, f)) {
continue;
}
SimpleOrderedMap<Object> fileInfo = new SimpleOrderedMap<Object>();
files.add(f, fileInfo);
List<String> fchildren = zkClient.getChildren(adminFile + "/" + f, null, true);
if (fchildren.size() > 0) {
fileInfo.add("directory", true);
} else {
// TODO? content type
fileInfo.add("size", f.length());
}
// TODO: ?
// fileInfo.add( "modified", new Date( f.lastModified() ) );
}
rsp.add("files", files);
} else {
// Include the file contents
// The file logic depends on RawResponseWriter, so force its use.
ModifiableSolrParams params = new ModifiableSolrParams(req.getParams());
params.set(CommonParams.WT, "raw");
req.setParams(params);
ContentStreamBase content = new ContentStreamBase.ByteArrayStream(zkClient.getData(adminFile, null, null, true), adminFile);
content.setContentType(req.getParams().get(USE_CONTENT_TYPE));
rsp.add(RawResponseWriter.CONTENT, content);
}
rsp.setHttpCaching(false);
}
|
private void parseSQLDTA(DRDAStatement stmt) throws DRDAProtocolException,SQLException
{
try {
parseSQLDTA_work(stmt);
}
catch (SQLException se)
{
skipRemainder(false);
throw se;
}
}
| private void parseSQLDTA(DRDAStatement stmt) throws DRDAProtocolException,SQLException
{
try {
parseSQLDTA_work(stmt);
}
catch (SQLException se)
{
skipRemainder(true);
throw se;
}
}
|
public int compare(ColumnFamilyStore o1, ColumnFamilyStore o2)
{
long size1 = o1.getTotalMemtableLiveSize();
long size2 = o2.getTotalMemtableLiveSize();
if (size1 < size2)
return -1;
if (size1 > size2)
return 1;
return 0;
}
});
// flush largest first until we get below our threshold.
// although it looks like liveBytes + flushingBytes will stay a constant, it will not if flushes finish
// while we loop, which is especially likely to happen if the flush queue fills up (so further forceFlush calls block)
while (true)
{
flushingBytes = countFlushingBytes();
if (liveBytes + flushingBytes <= DatabaseDescriptor.getTotalMemtableSpaceInMB() * 1048576L || sorted.isEmpty())
break;
ColumnFamilyStore cfs = sorted.remove(sorted.size() - 1);
long size = cfs.getTotalMemtableLiveSize();
logger.info("flushing {} to free up {} bytes", cfs, size);
liveBytes -= size;
cfs.forceFlush();
}
}
finally
{
logger.debug("memtable memory usage is {} bytes with {} live", liveBytes + flushingBytes, liveBytes);
}
}
| public int compare(ColumnFamilyStore o1, ColumnFamilyStore o2)
{
long size1 = o1.getTotalMemtableLiveSize();
long size2 = o2.getTotalMemtableLiveSize();
if (size1 < size2)
return -1;
if (size1 > size2)
return 1;
return 0;
}
});
// flush largest first until we get below our threshold.
// although it looks like liveBytes + flushingBytes will stay a constant, it will not if flushes finish
// while we loop, which is especially likely to happen if the flush queue fills up (so further forceFlush calls block)
while (true)
{
flushingBytes = countFlushingBytes();
if (liveBytes + flushingBytes <= DatabaseDescriptor.getTotalMemtableSpaceInMB() * 1048576L || sorted.isEmpty())
break;
ColumnFamilyStore cfs = sorted.remove(sorted.size() - 1);
long size = cfs.getTotalMemtableLiveSize();
logger.info("flushing {} to free up {} bytes", cfs, size);
liveBytes -= size;
cfs.forceFlush();
}
}
finally
{
logger.trace("memtable memory usage is {} bytes with {} live", liveBytes + flushingBytes, liveBytes);
}
}
|
public void addSSTable(SSTableReader sstable)
{
ssTables_.add(sstable);
CompactionManager.instance.submitMinor(this);
}
| public void addSSTable(SSTableReader sstable)
{
ssTables_.add(sstable);
CompactionManager.instance.submitMinorIfNeeded(this);
}
|
public void testCompactions() throws IOException, ExecutionException, InterruptedException
{
CompactionManager.instance.disableAutoCompaction();
// this test does enough rows to force multiple block indexes to be used
Table table = Table.open(TABLE1);
ColumnFamilyStore store = table.getColumnFamilyStore("Standard1");
final int ROWS_PER_SSTABLE = 10;
Set<String> inserted = new HashSet<String>();
for (int j = 0; j < (SSTableReader.indexInterval() * 3) / ROWS_PER_SSTABLE; j++) {
for (int i = 0; i < ROWS_PER_SSTABLE; i++) {
String key = String.valueOf(i % 2);
RowMutation rm = new RowMutation(TABLE1, key);
rm.add(new QueryPath("Standard1", null, String.valueOf(i / 2).getBytes()), new byte[0], j * ROWS_PER_SSTABLE + i);
rm.apply();
inserted.add(key);
}
store.forceBlockingFlush();
assertEquals(inserted.size(), table.getColumnFamilyStore("Standard1").getKeyRange("", "", 10000).keys.size());
}
while (true)
{
Future<Integer> ft = CompactionManager.instance.submitMinor(store);
if (ft.get() == 0)
break;
}
if (store.getSSTables().size() > 1)
{
CompactionManager.instance.submitMajor(store).get();
}
assertEquals(inserted.size(), table.getColumnFamilyStore("Standard1").getKeyRange("", "", 10000).keys.size());
}
| public void testCompactions() throws IOException, ExecutionException, InterruptedException
{
CompactionManager.instance.disableAutoCompaction();
// this test does enough rows to force multiple block indexes to be used
Table table = Table.open(TABLE1);
ColumnFamilyStore store = table.getColumnFamilyStore("Standard1");
final int ROWS_PER_SSTABLE = 10;
Set<String> inserted = new HashSet<String>();
for (int j = 0; j < (SSTableReader.indexInterval() * 3) / ROWS_PER_SSTABLE; j++) {
for (int i = 0; i < ROWS_PER_SSTABLE; i++) {
String key = String.valueOf(i % 2);
RowMutation rm = new RowMutation(TABLE1, key);
rm.add(new QueryPath("Standard1", null, String.valueOf(i / 2).getBytes()), new byte[0], j * ROWS_PER_SSTABLE + i);
rm.apply();
inserted.add(key);
}
store.forceBlockingFlush();
assertEquals(inserted.size(), table.getColumnFamilyStore("Standard1").getKeyRange("", "", 10000).keys.size());
}
while (true)
{
Future<Integer> ft = CompactionManager.instance.submitMinorIfNeeded(store);
if (ft.get() == 0)
break;
}
if (store.getSSTables().size() > 1)
{
CompactionManager.instance.submitMajor(store).get();
}
assertEquals(inserted.size(), table.getColumnFamilyStore("Standard1").getKeyRange("", "", 10000).keys.size());
}
|
private final SimpleDocValuesFormat defaultDVFormat = SimpleDocValuesFormat.forName("Memory");
// nocommit need simpleNormsFormat
} | private final SimpleDocValuesFormat defaultDVFormat = SimpleDocValuesFormat.forName("Lucene41");
// nocommit need simpleNormsFormat
} |
public List<String> getIncomingFiles(String host) throws IOException
{
List<String> files = new ArrayList<String>();
for (PendingFile pf : StreamInManager.getIncomingFiles(InetAddress.getByName(host)))
{
files.add(String.format("%s: %s", pf.getDescriptor().ksname, pf.toString()));
}
return files;
}
| public List<String> getIncomingFiles(String host) throws IOException
{
List<String> files = new ArrayList<String>();
for (PendingFile pf : StreamInManager.getIncomingFiles(InetAddress.getByName(host)))
{
files.add(String.format("%s: %s", pf.desc.ksname, pf.toString()));
}
return files;
}
|
public LinkedHashMap<PendingFile, PendingFile> getContextMapping(PendingFile[] remoteFiles) throws IOException
{
/* Create a local sstable for each remote sstable */
LinkedHashMap<PendingFile, PendingFile> mapping = new LinkedHashMap<PendingFile, PendingFile>();
for (PendingFile remote : remoteFiles)
{
Descriptor remotedesc = remote.getDescriptor();
// new local sstable
Table table = Table.open(remotedesc.ksname);
ColumnFamilyStore cfStore = table.getColumnFamilyStore(remotedesc.cfname);
Descriptor localdesc = Descriptor.fromFilename(cfStore.getFlushPath());
// add a local file for this component
mapping.put(remote, new PendingFile(localdesc, remote));
}
return mapping;
}
| public LinkedHashMap<PendingFile, PendingFile> getContextMapping(PendingFile[] remoteFiles) throws IOException
{
/* Create a local sstable for each remote sstable */
LinkedHashMap<PendingFile, PendingFile> mapping = new LinkedHashMap<PendingFile, PendingFile>();
for (PendingFile remote : remoteFiles)
{
Descriptor remotedesc = remote.desc;
// new local sstable
Table table = Table.open(remotedesc.ksname);
ColumnFamilyStore cfStore = table.getColumnFamilyStore(remotedesc.cfname);
Descriptor localdesc = Descriptor.fromFilename(cfStore.getFlushPath());
// add a local file for this component
mapping.put(remote, new PendingFile(localdesc, remote));
}
return mapping;
}
|
public void geohashRecursiveRandom() throws IOException {
init(12);
//1. Iterate test with the cluster at some worldly point of interest
Point[] clusterCenters = new Point[]{ctx.makePoint(-180,0), ctx.makePoint(0,90), ctx.makePoint(0,-90)};
for (Point clusterCenter : clusterCenters) {
//2. Iterate on size of cluster (a really small one and a large one)
String hashCenter = GeohashUtils.encodeLatLon(clusterCenter.getY(), clusterCenter.getX(), maxLength);
//calculate the number of degrees in the smallest grid box size (use for both lat & lon)
String smallBox = hashCenter.substring(0,hashCenter.length()-1);//chop off leaf precision
Rectangle clusterDims = GeohashUtils.decodeBoundary(smallBox,ctx);
double smallRadius = Math.max(clusterDims.getMaxX()-clusterDims.getMinX(),clusterDims.getMaxY()-clusterDims.getMinY());
assert smallRadius < 1;
double largeRadius = 20d;//good large size; don't use >=45 for this test code to work
double[] radiusDegs = {largeRadius,smallRadius};
for (double radiusDeg : radiusDegs) {
//3. Index random points in this cluster circle
deleteAll();
List<Point> points = new ArrayList<Point>();
for(int i = 0; i < 20; i++) {
//Note that this will not result in randomly distributed points in the
// circle, they will be concentrated towards the center a little. But
// it's good enough.
Point pt = ctx.getDistCalc().pointOnBearing(clusterCenter,
random().nextDouble() * radiusDeg, random().nextInt() * 360, ctx, null);
pt = alignGeohash(pt);
points.add(pt);
addDocument(newDoc("" + i, pt));
}
commit();
//3. Use some query centers. Each is twice the cluster's radius away.
for(int ri = 0; ri < 4; ri++) {
Point queryCenter = ctx.getDistCalc().pointOnBearing(clusterCenter,
radiusDeg*2, random().nextInt(360), ctx, null);
queryCenter = alignGeohash(queryCenter);
//4.1 Query a small box getting nothing
checkHits(q(queryCenter, radiusDeg - smallRadius/2), 0, null);
//4.2 Query a large box enclosing the cluster, getting everything
checkHits(q(queryCenter, radiusDeg*3*1.01), points.size(), null);
//4.3 Query a medium box getting some (calculate the correct solution and verify)
double queryDist = radiusDeg * 2;
//Find matching points. Put into int[] of doc ids which is the same thing as the index into points list.
int[] ids = new int[points.size()];
int ids_sz = 0;
for (int i = 0; i < points.size(); i++) {
Point point = points.get(i);
if (ctx.getDistCalc().distance(queryCenter, point) <= queryDist)
ids[ids_sz++] = i;
}
ids = Arrays.copyOf(ids, ids_sz);
//assert ids_sz > 0 (can't because randomness keeps us from being able to)
checkHits(q(queryCenter, queryDist), ids.length, ids);
}
}//for radiusDeg
}//for clusterCenter
}//randomTest()
| public void geohashRecursiveRandom() throws IOException {
init(12);
//1. Iterate test with the cluster at some worldly point of interest
Point[] clusterCenters = new Point[]{ctx.makePoint(-180,0), ctx.makePoint(0,90), ctx.makePoint(0,-90)};
for (Point clusterCenter : clusterCenters) {
//2. Iterate on size of cluster (a really small one and a large one)
String hashCenter = GeohashUtils.encodeLatLon(clusterCenter.getY(), clusterCenter.getX(), maxLength);
//calculate the number of degrees in the smallest grid box size (use for both lat & lon)
String smallBox = hashCenter.substring(0,hashCenter.length()-1);//chop off leaf precision
Rectangle clusterDims = GeohashUtils.decodeBoundary(smallBox,ctx);
double smallRadius = Math.max(clusterDims.getMaxX()-clusterDims.getMinX(),clusterDims.getMaxY()-clusterDims.getMinY());
assert smallRadius < 1;
double largeRadius = 20d;//good large size; don't use >=45 for this test code to work
double[] radiusDegs = {largeRadius,smallRadius};
for (double radiusDeg : radiusDegs) {
//3. Index random points in this cluster circle
deleteAll();
List<Point> points = new ArrayList<Point>();
for(int i = 0; i < 20; i++) {
//Note that this will not result in randomly distributed points in the
// circle, they will be concentrated towards the center a little. But
// it's good enough.
Point pt = ctx.getDistCalc().pointOnBearing(clusterCenter,
random().nextDouble() * radiusDeg, random().nextInt() * 360, ctx, null);
pt = alignGeohash(pt);
points.add(pt);
addDocument(newDoc("" + i, pt));
}
commit();
//3. Use some query centers. Each is twice the cluster's radius away.
for(int ri = 0; ri < 4; ri++) {
Point queryCenter = ctx.getDistCalc().pointOnBearing(clusterCenter,
radiusDeg*2, random().nextInt(360), ctx, null);
queryCenter = alignGeohash(queryCenter);
//4.1 Query a small box getting nothing
checkHits(q(queryCenter, radiusDeg - smallRadius/2), 0, null);
//4.2 Query a large box enclosing the cluster, getting everything
checkHits(q(queryCenter, radiusDeg*3 + smallRadius/2), points.size(), null);
//4.3 Query a medium box getting some (calculate the correct solution and verify)
double queryDist = radiusDeg * 2;
//Find matching points. Put into int[] of doc ids which is the same thing as the index into points list.
int[] ids = new int[points.size()];
int ids_sz = 0;
for (int i = 0; i < points.size(); i++) {
Point point = points.get(i);
if (ctx.getDistCalc().distance(queryCenter, point) <= queryDist)
ids[ids_sz++] = i;
}
ids = Arrays.copyOf(ids, ids_sz);
//assert ids_sz > 0 (can't because randomness keeps us from being able to)
checkHits(q(queryCenter, queryDist), ids.length, ids);
}
}//for radiusDeg
}//for clusterCenter
}//randomTest()
|
public CoreContainer initialize() throws IOException, ParserConfigurationException, SAXException {
CoreContainer cores = null;
String instanceDir = SolrResourceLoader.locateInstanceDir();
File fconf = new File(instanceDir, solrConfigFilename == null? "solr.xml": solrConfigFilename);
log.info("looking for solr.xml: " + fconf.getAbsolutePath());
if (fconf.exists()) {
cores = new CoreContainer();
cores.load(instanceDir, fconf);
abortOnConfigurationError = false;
// if any core aborts on startup, then abort
for (SolrCore c : cores.getCores()) {
if (c.getSolrConfig().getBool("abortOnConfigurationError", false)) {
abortOnConfigurationError = true;
break;
}
}
solrConfigFilename = cores.getConfigFile().getName();
} else {
// perform compatibility init
cores = new CoreContainer(new SolrResourceLoader(instanceDir));
SolrConfig cfg = solrConfigFilename == null ? new SolrConfig() : new SolrConfig(solrConfigFilename);
CoreDescriptor dcore = new CoreDescriptor(cores, "", cfg.getResourceLoader().getInstanceDir());
SolrCore singlecore = new SolrCore(null, null, cfg, null, dcore);
abortOnConfigurationError = cfg.getBool(
"abortOnConfigurationError", abortOnConfigurationError);
cores.register("", singlecore, false);
cores.setPersistent(false);
solrConfigFilename = cfg.getName();
}
return cores;
}
}
| public CoreContainer initialize() throws IOException, ParserConfigurationException, SAXException {
CoreContainer cores = null;
String instanceDir = SolrResourceLoader.locateInstanceDir();
File fconf = new File(instanceDir, solrConfigFilename == null? "solr.xml": solrConfigFilename);
log.info("looking for solr.xml: " + fconf.getAbsolutePath());
if (fconf.exists()) {
cores = new CoreContainer();
cores.load(instanceDir, fconf);
abortOnConfigurationError = false;
// if any core aborts on startup, then abort
for (SolrCore c : cores.getCores()) {
if (c.getSolrConfig().getBool("abortOnConfigurationError", false)) {
abortOnConfigurationError = true;
break;
}
}
solrConfigFilename = cores.getConfigFile().getName();
} else {
// perform compatibility init
cores = new CoreContainer(new SolrResourceLoader(instanceDir));
SolrConfig cfg = solrConfigFilename == null ? new SolrConfig() : new SolrConfig(solrConfigFilename);
CoreDescriptor dcore = new CoreDescriptor(cores, "", ".");
SolrCore singlecore = new SolrCore(null, null, cfg, null, dcore);
abortOnConfigurationError = cfg.getBool(
"abortOnConfigurationError", abortOnConfigurationError);
cores.register("", singlecore, false);
cores.setPersistent(false);
solrConfigFilename = cfg.getName();
}
return cores;
}
}
|
public static HashFunction[] createHashFunctions(HashType type, int numFunctions) {
HashFunction[] hashFunction = new HashFunction[numFunctions];
Random seed = new Random(11);
switch (type) {
case LINEAR:
for (int i = 0; i < numFunctions; i++) {
hashFunction[i] = new LinearHash(seed.nextInt(), seed.nextInt());
}
break;
case POLYNOMIAL:
for (int i = 0; i < numFunctions; i++) {
hashFunction[i] = new PolynomialHash(seed.nextInt(), seed.nextInt(), seed.nextInt());
}
break;
case MURMUR:
for (int i = 0; i < numFunctions; i++) {
hashFunction[i] = new MurmurHashWrapper(seed.nextInt());
}
break;
}
return hashFunction;
}
| public static HashFunction[] createHashFunctions(HashType type, int numFunctions) {
HashFunction[] hashFunction = new HashFunction[numFunctions];
Random seed = RandomUtils.getRandom(11);
switch (type) {
case LINEAR:
for (int i = 0; i < numFunctions; i++) {
hashFunction[i] = new LinearHash(seed.nextInt(), seed.nextInt());
}
break;
case POLYNOMIAL:
for (int i = 0; i < numFunctions; i++) {
hashFunction[i] = new PolynomialHash(seed.nextInt(), seed.nextInt(), seed.nextInt());
}
break;
case MURMUR:
for (int i = 0; i < numFunctions; i++) {
hashFunction[i] = new MurmurHashWrapper(seed.nextInt());
}
break;
}
return hashFunction;
}
|
public List<TokenRange> describe_ring(String keyspace)throws InvalidRequestException
{
if (!DatabaseDescriptor.getNonSystemTables().contains(keyspace))
throw new InvalidRequestException("There is no ring for the keyspace: " + keyspace);
List<TokenRange> ranges = new ArrayList<TokenRange>();
Token.TokenFactory tf = StorageService.getPartitioner().getTokenFactory();
for (Map.Entry<Range, List<String>> entry : StorageService.instance.getRangeToEndpointMap(keyspace).entrySet())
{
Range range = entry.getKey();
List<String> endpoints = entry.getValue();
ranges.add(new TokenRange(tf.toString(range.left), tf.toString(range.right), endpoints));
}
return ranges;
}
| public List<TokenRange> describe_ring(String keyspace)throws InvalidRequestException
{
if (keyspace == null || !DatabaseDescriptor.getNonSystemTables().contains(keyspace))
throw new InvalidRequestException("There is no ring for the keyspace: " + keyspace);
List<TokenRange> ranges = new ArrayList<TokenRange>();
Token.TokenFactory tf = StorageService.getPartitioner().getTokenFactory();
for (Map.Entry<Range, List<String>> entry : StorageService.instance.getRangeToEndpointMap(keyspace).entrySet())
{
Range range = entry.getKey();
List<String> endpoints = entry.getValue();
ranges.add(new TokenRange(tf.toString(range.left), tf.toString(range.right), endpoints));
}
return ranges;
}
|
public void testSortedBytes() throws IOException {
DocValuesType type = DocValuesType.SORTED;
final Directory d = newDirectory();
IndexWriterConfig cfg = newIndexWriterConfig(TEST_VERSION_CURRENT,
new MockAnalyzer(random()));
IndexWriter w = new IndexWriter(d, cfg);
int numDocs = atLeast(100);
BytesRefHash hash = new BytesRefHash();
Map<String, String> docToString = new HashMap<String, String>();
int len = 1 + random().nextInt(50);
for (int i = 0; i < numDocs; i++) {
Document doc = new Document();
doc.add(newTextField("id", "" + i, Field.Store.YES));
String string = _TestUtil.randomRealisticUnicodeString(random(), 1, len);
BytesRef br = new BytesRef(string);
doc.add(new SortedBytesDocValuesField("field", br));
hash.add(br);
docToString.put("" + i, string);
w.addDocument(doc);
}
if (rarely()) {
w.commit();
}
int numDocsNoValue = atLeast(10);
for (int i = 0; i < numDocsNoValue; i++) {
Document doc = new Document();
doc.add(newTextField("id", "noValue", Field.Store.YES));
w.addDocument(doc);
}
BytesRef bytesRef = new BytesRef();
hash.add(bytesRef); // add empty value for the gaps
if (rarely()) {
w.commit();
}
for (int i = 0; i < numDocs; i++) {
Document doc = new Document();
String id = "" + i + numDocs;
doc.add(newTextField("id", id, Field.Store.YES));
String string = _TestUtil.randomRealisticUnicodeString(random(), 1, len);
BytesRef br = new BytesRef(string);
hash.add(br);
docToString.put(id, string);
doc.add(new SortedBytesDocValuesField("field", br));
w.addDocument(doc);
}
w.commit();
IndexReader reader = w.getReader();
SortedDocValues docValues = MultiSimpleDocValues.simpleSortedValues(reader, "field");
int[] sort = hash.sort(BytesRef.getUTF8SortedAsUnicodeComparator());
BytesRef expected = new BytesRef();
BytesRef actual = new BytesRef();
assertEquals(hash.size(), docValues.getValueCount());
for (int i = 0; i < hash.size(); i++) {
hash.get(sort[i], expected);
docValues.lookupOrd(i, actual);
assertEquals(expected.utf8ToString(), actual.utf8ToString());
int ord = docValues.lookupTerm(expected, actual);
assertEquals(i, ord);
}
AtomicReader slowR = SlowCompositeReaderWrapper.wrap(reader);
Set<Entry<String, String>> entrySet = docToString.entrySet();
for (Entry<String, String> entry : entrySet) {
int docId = docId(slowR, new Term("id", entry.getKey()));
expected = new BytesRef(entry.getValue());
docValues.get(docId, actual);
assertEquals(expected, actual);
}
reader.close();
w.close();
d.close();
}
| public void testSortedBytes() throws IOException {
DocValuesType type = DocValuesType.SORTED;
final Directory d = newDirectory();
IndexWriterConfig cfg = newIndexWriterConfig(TEST_VERSION_CURRENT,
new MockAnalyzer(random()));
IndexWriter w = new IndexWriter(d, cfg);
int numDocs = atLeast(100);
BytesRefHash hash = new BytesRefHash();
Map<String, String> docToString = new HashMap<String, String>();
int len = 1 + random().nextInt(50);
for (int i = 0; i < numDocs; i++) {
Document doc = new Document();
doc.add(newTextField("id", "" + i, Field.Store.YES));
String string = _TestUtil.randomRealisticUnicodeString(random(), 1, len);
BytesRef br = new BytesRef(string);
doc.add(new SortedBytesDocValuesField("field", br));
hash.add(br);
docToString.put("" + i, string);
w.addDocument(doc);
}
if (rarely()) {
w.commit();
}
int numDocsNoValue = atLeast(10);
for (int i = 0; i < numDocsNoValue; i++) {
Document doc = new Document();
doc.add(newTextField("id", "noValue", Field.Store.YES));
w.addDocument(doc);
}
BytesRef bytesRef = new BytesRef();
hash.add(bytesRef); // add empty value for the gaps
if (rarely()) {
w.commit();
}
for (int i = 0; i < numDocs; i++) {
Document doc = new Document();
String id = "" + i + numDocs;
doc.add(newTextField("id", id, Field.Store.YES));
String string = _TestUtil.randomRealisticUnicodeString(random(), 1, len);
BytesRef br = new BytesRef(string);
hash.add(br);
docToString.put(id, string);
doc.add(new SortedBytesDocValuesField("field", br));
w.addDocument(doc);
}
w.commit();
IndexReader reader = w.getReader();
SortedDocValues docValues = MultiDocValues.getSortedValues(reader, "field");
int[] sort = hash.sort(BytesRef.getUTF8SortedAsUnicodeComparator());
BytesRef expected = new BytesRef();
BytesRef actual = new BytesRef();
assertEquals(hash.size(), docValues.getValueCount());
for (int i = 0; i < hash.size(); i++) {
hash.get(sort[i], expected);
docValues.lookupOrd(i, actual);
assertEquals(expected.utf8ToString(), actual.utf8ToString());
int ord = docValues.lookupTerm(expected, actual);
assertEquals(i, ord);
}
AtomicReader slowR = SlowCompositeReaderWrapper.wrap(reader);
Set<Entry<String, String>> entrySet = docToString.entrySet();
for (Entry<String, String> entry : entrySet) {
int docId = docId(slowR, new Term("id", entry.getKey()));
expected = new BytesRef(entry.getValue());
docValues.get(docId, actual);
assertEquals(expected, actual);
}
reader.close();
w.close();
d.close();
}
|
public void testAddDocument() throws Exception {
Document testDoc = new Document();
DocHelper.setupDoc(testDoc);
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())));
writer.addDocument(testDoc);
writer.commit();
SegmentInfoPerCommit info = writer.newestSegment();
writer.close();
//After adding the document, we should be able to read it back in
SegmentReader reader = new SegmentReader(info, DirectoryReader.DEFAULT_TERMS_INDEX_DIVISOR, newIOContext(random()));
assertTrue(reader != null);
StoredDocument doc = reader.document(0);
assertTrue(doc != null);
//System.out.println("Document: " + doc);
StorableField[] fields = doc.getFields("textField2");
assertTrue(fields != null && fields.length == 1);
assertTrue(fields[0].stringValue().equals(DocHelper.FIELD_2_TEXT));
assertTrue(fields[0].fieldType().storeTermVectors());
fields = doc.getFields("textField1");
assertTrue(fields != null && fields.length == 1);
assertTrue(fields[0].stringValue().equals(DocHelper.FIELD_1_TEXT));
assertFalse(fields[0].fieldType().storeTermVectors());
fields = doc.getFields("keyField");
assertTrue(fields != null && fields.length == 1);
assertTrue(fields[0].stringValue().equals(DocHelper.KEYWORD_TEXT));
fields = doc.getFields(DocHelper.NO_NORMS_KEY);
assertTrue(fields != null && fields.length == 1);
assertTrue(fields[0].stringValue().equals(DocHelper.NO_NORMS_TEXT));
fields = doc.getFields(DocHelper.TEXT_FIELD_3_KEY);
assertTrue(fields != null && fields.length == 1);
assertTrue(fields[0].stringValue().equals(DocHelper.FIELD_3_TEXT));
// test that the norms are not present in the segment if
// omitNorms is true
for (FieldInfo fi : reader.getFieldInfos()) {
if (fi.isIndexed()) {
assertTrue(fi.omitsNorms() == (reader.simpleNormValues(fi.name) == null));
}
}
reader.close();
}
| public void testAddDocument() throws Exception {
Document testDoc = new Document();
DocHelper.setupDoc(testDoc);
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())));
writer.addDocument(testDoc);
writer.commit();
SegmentInfoPerCommit info = writer.newestSegment();
writer.close();
//After adding the document, we should be able to read it back in
SegmentReader reader = new SegmentReader(info, DirectoryReader.DEFAULT_TERMS_INDEX_DIVISOR, newIOContext(random()));
assertTrue(reader != null);
StoredDocument doc = reader.document(0);
assertTrue(doc != null);
//System.out.println("Document: " + doc);
StorableField[] fields = doc.getFields("textField2");
assertTrue(fields != null && fields.length == 1);
assertTrue(fields[0].stringValue().equals(DocHelper.FIELD_2_TEXT));
assertTrue(fields[0].fieldType().storeTermVectors());
fields = doc.getFields("textField1");
assertTrue(fields != null && fields.length == 1);
assertTrue(fields[0].stringValue().equals(DocHelper.FIELD_1_TEXT));
assertFalse(fields[0].fieldType().storeTermVectors());
fields = doc.getFields("keyField");
assertTrue(fields != null && fields.length == 1);
assertTrue(fields[0].stringValue().equals(DocHelper.KEYWORD_TEXT));
fields = doc.getFields(DocHelper.NO_NORMS_KEY);
assertTrue(fields != null && fields.length == 1);
assertTrue(fields[0].stringValue().equals(DocHelper.NO_NORMS_TEXT));
fields = doc.getFields(DocHelper.TEXT_FIELD_3_KEY);
assertTrue(fields != null && fields.length == 1);
assertTrue(fields[0].stringValue().equals(DocHelper.FIELD_3_TEXT));
// test that the norms are not present in the segment if
// omitNorms is true
for (FieldInfo fi : reader.getFieldInfos()) {
if (fi.isIndexed()) {
assertTrue(fi.omitsNorms() == (reader.getNormValues(fi.name) == null));
}
}
reader.close();
}
|
public void testFloatNorms() throws IOException {
Directory dir = newDirectory();
IndexWriterConfig config = newIndexWriterConfig(TEST_VERSION_CURRENT,
new MockAnalyzer(random()));
Similarity provider = new MySimProvider();
config.setSimilarity(provider);
RandomIndexWriter writer = new RandomIndexWriter(random(), dir, config);
final LineFileDocs docs = new LineFileDocs(random());
int num = atLeast(100);
for (int i = 0; i < num; i++) {
Document doc = docs.nextDoc();
float nextFloat = random().nextFloat();
Field f = new TextField(floatTestField, "" + nextFloat, Field.Store.YES);
f.setBoost(nextFloat);
doc.add(f);
writer.addDocument(doc);
doc.removeField(floatTestField);
if (rarely()) {
writer.commit();
}
}
writer.commit();
writer.close();
AtomicReader open = SlowCompositeReaderWrapper.wrap(DirectoryReader.open(dir));
NumericDocValues norms = open.simpleNormValues(floatTestField);
assertNotNull(norms);
for (int i = 0; i < open.maxDoc(); i++) {
StoredDocument document = open.document(i);
float expected = Float.parseFloat(document.get(floatTestField));
assertEquals(expected, Float.intBitsToFloat((int)norms.get(i)), 0.0f);
}
open.close();
dir.close();
docs.close();
}
| public void testFloatNorms() throws IOException {
Directory dir = newDirectory();
IndexWriterConfig config = newIndexWriterConfig(TEST_VERSION_CURRENT,
new MockAnalyzer(random()));
Similarity provider = new MySimProvider();
config.setSimilarity(provider);
RandomIndexWriter writer = new RandomIndexWriter(random(), dir, config);
final LineFileDocs docs = new LineFileDocs(random());
int num = atLeast(100);
for (int i = 0; i < num; i++) {
Document doc = docs.nextDoc();
float nextFloat = random().nextFloat();
Field f = new TextField(floatTestField, "" + nextFloat, Field.Store.YES);
f.setBoost(nextFloat);
doc.add(f);
writer.addDocument(doc);
doc.removeField(floatTestField);
if (rarely()) {
writer.commit();
}
}
writer.commit();
writer.close();
AtomicReader open = SlowCompositeReaderWrapper.wrap(DirectoryReader.open(dir));
NumericDocValues norms = open.getNormValues(floatTestField);
assertNotNull(norms);
for (int i = 0; i < open.maxDoc(); i++) {
StoredDocument document = open.document(i);
float expected = Float.parseFloat(document.get(floatTestField));
assertEquals(expected, Float.intBitsToFloat((int)norms.get(i)), 0.0f);
}
open.close();
dir.close();
docs.close();
}
|
public void test() throws Exception {
NumericDocValues fooNorms = MultiSimpleDocValues.simpleNormValues(reader, "foo");
assertNotNull(fooNorms);
for (int i = 0; i < reader.maxDoc(); i++) {
assertEquals(expected.get(i).longValue(), fooNorms.get(i));
}
}
| public void test() throws Exception {
NumericDocValues fooNorms = MultiDocValues.getNormValues(reader, "foo");
assertNotNull(fooNorms);
for (int i = 0; i < reader.maxDoc(); i++) {
assertEquals(expected.get(i).longValue(), fooNorms.get(i));
}
}
|
public void test() throws Exception {
NumericDocValues fooNorms = MultiSimpleDocValues.simpleNormValues(reader, "foo");
for (int i = 0; i < reader.maxDoc(); i++) {
assertEquals(expected.get(i).intValue(), fooNorms.get(i) & 0xff);
}
}
| public void test() throws Exception {
NumericDocValues fooNorms = MultiDocValues.getNormValues(reader, "foo");
for (int i = 0; i < reader.maxDoc(); i++) {
assertEquals(expected.get(i).intValue(), fooNorms.get(i) & 0xff);
}
}
|
public void test1PhraseShortMV() throws Exception {
makeIndexShortMV();
FieldQuery fq = new FieldQuery( tq( "d" ), true, true );
FieldTermStack stack = new FieldTermStack( reader, 0, F, fq );
FieldPhraseList fpl = new FieldPhraseList( stack, fq );
SimpleFragListBuilder sflb = new SimpleFragListBuilder();
FieldFragList ffl = sflb.createFieldFragList( fpl, 100 );
assertEquals( 1, ffl.fragInfos.size() );
assertEquals( "subInfos=(d((6,7)))/1.0(0,100)", ffl.fragInfos.get( 0 ).toString() );
}
| public void test1PhraseShortMV() throws Exception {
makeIndexShortMV();
FieldQuery fq = new FieldQuery( tq( "d" ), true, true );
FieldTermStack stack = new FieldTermStack( reader, 0, F, fq );
FieldPhraseList fpl = new FieldPhraseList( stack, fq );
SimpleFragListBuilder sflb = new SimpleFragListBuilder();
FieldFragList ffl = sflb.createFieldFragList( fpl, 100 );
assertEquals( 1, ffl.fragInfos.size() );
assertEquals( "subInfos=(d((9,10)))/1.0(3,103)", ffl.fragInfos.get( 0 ).toString() );
}
|
public void test1PhraseShortMV() throws Exception {
makeIndexShortMV();
FieldQuery fq = new FieldQuery( tq( "d" ), true, true );
FieldTermStack stack = new FieldTermStack( reader, 0, F, fq );
assertEquals( 1, stack.termList.size() );
assertEquals( "d(6,7,3)", stack.pop().toString() );
}
| public void test1PhraseShortMV() throws Exception {
makeIndexShortMV();
FieldQuery fq = new FieldQuery( tq( "d" ), true, true );
FieldTermStack stack = new FieldTermStack( reader, 0, F, fq );
assertEquals( 1, stack.termList.size() );
assertEquals( "d(9,10,3)", stack.pop().toString() );
}
|
public void test1PhraseShortMV() throws Exception {
makeIndexShortMV();
FieldQuery fq = new FieldQuery( tq( "d" ), true, true );
FieldTermStack stack = new FieldTermStack( reader, 0, F, fq );
FieldPhraseList fpl = new FieldPhraseList( stack, fq );
assertEquals( 1, fpl.phraseList.size() );
assertEquals( "d(1.0)((6,7))", fpl.phraseList.get( 0 ).toString() );
}
| public void test1PhraseShortMV() throws Exception {
makeIndexShortMV();
FieldQuery fq = new FieldQuery( tq( "d" ), true, true );
FieldTermStack stack = new FieldTermStack( reader, 0, F, fq );
FieldPhraseList fpl = new FieldPhraseList( stack, fq );
assertEquals( 1, fpl.phraseList.size() );
assertEquals( "d(1.0)((9,10))", fpl.phraseList.get( 0 ).toString() );
}
|
private void unCache(String fileName) throws IOException {
// Only let one thread uncache at a time; this only
// happens during commit() or close():
synchronized(uncacheLock) {
if (VERBOSE) {
System.out.println("nrtdir.unCache name=" + fileName);
}
if (!cache.fileExists(fileName)) {
// Another thread beat us...
return;
}
if (delegate.fileExists(fileName)) {
throw new IOException("cannot uncache file=\"" + fileName + "\": it was separately also created in the delegate directory");
}
final IOContext context = IOContext.DEFAULT;
final IndexOutput out = delegate.createOutput(fileName, context);
IndexInput in = null;
try {
in = cache.openInput(fileName, context);
in.copyBytes(out, in.length());
} finally {
IOUtils.close(in, out);
}
// Lock order: uncacheLock -> this
synchronized(this) {
// Must sync here because other sync methods have
// if (cache.fileExists(name)) { ... } else { ... }:
cache.deleteFile(fileName);
}
}
}
| private void unCache(String fileName) throws IOException {
// Only let one thread uncache at a time; this only
// happens during commit() or close():
synchronized(uncacheLock) {
if (VERBOSE) {
System.out.println("nrtdir.unCache name=" + fileName);
}
if (!cache.fileExists(fileName)) {
// Another thread beat us...
return;
}
if (delegate.fileExists(fileName)) {
throw new IOException("cannot uncache file=\"" + fileName + "\": it was separately also created in the delegate directory");
}
final IOContext context = IOContext.DEFAULT;
final IndexOutput out = delegate.createOutput(fileName, context);
IndexInput in = null;
try {
in = cache.openInput(fileName, context);
out.copyBytes(in, in.length());
} finally {
IOUtils.close(in, out);
}
// Lock order: uncacheLock -> this
synchronized(this) {
// Must sync here because other sync methods have
// if (cache.fileExists(name)) { ... } else { ... }:
cache.deleteFile(fileName);
}
}
}
|
public void testDerby3000() throws SQLException, IOException {
ResultSet rs;
// Derby-3000 make sure we process only valid TableType values and
// process them correctly.
DatabaseMetaData dmd = getConnection().getMetaData();
Statement s = createStatement();
s.executeUpdate("CREATE TABLE APP.TAB (i int)");
s.executeUpdate("CREATE VIEW APP.V as SELECT * FROM TAB");
s.executeUpdate("CREATE SYNONYM TSYN FOR APP.TAB");
String[] withInvalidTableTypes = {"SYNONYM","TABLE","VIEW",
"GLOBAL TEMPORARY"};
// just ignore invalid types
rs = dmd.getTables( "%", "%", "%", withInvalidTableTypes);
JDBC.assertFullResultSet(rs,
new String[][] {{"","APP","TSYN","SYNONYM","",null,null,null,null,null},
{"","APP","TAB","TABLE","",null,null,null,null,null},
{"","APP","V","VIEW","",null,null,null,null,null}});
rs = dmd.getTables("%", "%", "%", new String[] {"GLOBAL TEMPORARY"});
JDBC.assertEmpty(rs);
rs = dmd.getTables("%", "%", "%", new String[] {"VIEW"});
JDBC.assertUnorderedResultSet(rs, new String[][]
{{"","APP","V","VIEW","",null,null,null,null,null}});
rs = dmd.getTables("%", "%", "%", new String[] {"TABLE"});
JDBC.assertUnorderedResultSet(rs,new String[][]
{{"","APP","TAB","TABLE","",null,null,null,null,null}} );
rs = dmd.getTables("%", "%", "%", new String[] {"SYNONYM"});
JDBC.assertUnorderedResultSet(rs, new String[][]
{{"","APP","TSYN","SYNONYM","",null,null,null,null,null}});
rs = dmd.getTables( "%", "%", "%", new String[] {"SYSTEM TABLE"});
assertEquals(19, JDBC.assertDrainResults(rs));
s.executeUpdate("DROP VIEW APP.V");
s.executeUpdate("DROP TABLE APP.TAB");
s.executeUpdate("DROP SYNONYM APP.TSYN");
}
| public void testDerby3000() throws SQLException, IOException {
ResultSet rs;
// Derby-3000 make sure we process only valid TableType values and
// process them correctly.
DatabaseMetaData dmd = getConnection().getMetaData();
Statement s = createStatement();
s.executeUpdate("CREATE TABLE APP.TAB (i int)");
s.executeUpdate("CREATE VIEW APP.V as SELECT * FROM TAB");
s.executeUpdate("CREATE SYNONYM TSYN FOR APP.TAB");
String[] withInvalidTableTypes = {"SYNONYM","TABLE","VIEW",
"GLOBAL TEMPORARY"};
// just ignore invalid types
rs = dmd.getTables( "%", "%", "%", withInvalidTableTypes);
JDBC.assertFullResultSet(rs,
new String[][] {{"","APP","TSYN","SYNONYM","",null,null,null,null,null},
{"","APP","TAB","TABLE","",null,null,null,null,null},
{"","APP","V","VIEW","",null,null,null,null,null}});
rs = dmd.getTables("%", "%", "%", new String[] {"GLOBAL TEMPORARY"});
JDBC.assertEmpty(rs);
rs = dmd.getTables("%", "%", "%", new String[] {"VIEW"});
JDBC.assertUnorderedResultSet(rs, new String[][]
{{"","APP","V","VIEW","",null,null,null,null,null}});
rs = dmd.getTables("%", "%", "%", new String[] {"TABLE"});
JDBC.assertUnorderedResultSet(rs,new String[][]
{{"","APP","TAB","TABLE","",null,null,null,null,null}} );
rs = dmd.getTables("%", "%", "%", new String[] {"SYNONYM"});
JDBC.assertUnorderedResultSet(rs, new String[][]
{{"","APP","TSYN","SYNONYM","",null,null,null,null,null}});
rs = dmd.getTables( "%", "%", "%", new String[] {"SYSTEM TABLE"});
assertEquals(20, JDBC.assertDrainResults(rs));
s.executeUpdate("DROP VIEW APP.V");
s.executeUpdate("DROP TABLE APP.TAB");
s.executeUpdate("DROP SYNONYM APP.TSYN");
}
|
public void testClobCreateLocatorSP() throws SQLException {
//initialize the locator to a default value.
int locator = -1;
//call the stored procedure to return the created locator.
CallableStatement cs = prepareCall
("? = CALL SYSIBM.CLOBCREATELOCATOR()");
cs.registerOutParameter(1, java.sql.Types.INTEGER);
cs.executeUpdate();
locator = cs.getInt(1);
//verify if the locator rturned and expected are equal.
//remember in setup a locator is already created
//hence expected value is 2
assertEquals("The locator values returned by " +
"SYSIBM.CLOBCREATELOCATOR() are incorrect", 4, locator);
cs.close();
}
| public void testClobCreateLocatorSP() throws SQLException {
//initialize the locator to a default value.
int locator = -1;
//call the stored procedure to return the created locator.
CallableStatement cs = prepareCall
("? = CALL SYSIBM.CLOBCREATELOCATOR()");
cs.registerOutParameter(1, java.sql.Types.INTEGER);
cs.executeUpdate();
locator = cs.getInt(1);
//verify if the locator rturned and expected are equal.
//remember in setup a locator is already created
//hence expected value is 2
assertEquals("The locator values returned by " +
"SYSIBM.CLOBCREATELOCATOR() are incorrect", 2, locator);
cs.close();
}
|
public void testFragmentCreation() throws Exception {
Bundle exportBundle = makeBundleWithExports("export.bundle", "1.2.3",
"export.package;version=\"1.0.0\";singleton:=true");
Dictionary fragmentHeaders = makeFragmentFromExportBundle(exportBundle)
.getHeaders();
assertNotNull("No headers in the fragment", fragmentHeaders);
assertEquals("Wrong symbolicName", "scooby.doo.test.fragment",
fragmentHeaders.get(Constants.BUNDLE_SYMBOLICNAME));
assertEquals("Wrong version", "0.0.0", fragmentHeaders
.get(Constants.BUNDLE_VERSION));
assertEquals("Wrong Bundle manifest version", "2", fragmentHeaders
.get(Constants.BUNDLE_MANIFESTVERSION));
assertEquals("Wrong Fragment host",
"scooby.doo;bundle-version=\"0.0.0\"", fragmentHeaders
.get(Constants.FRAGMENT_HOST));
assertEquals(
"Wrong Imports",
"export.package;version=\"1.0.0\";bundle-symbolic-name=\"export.bundle\";bundle-version=\"[1.2.3,1.2.3]\"",
fragmentHeaders.get(Constants.IMPORT_PACKAGE));
}
| public void testFragmentCreation() throws Exception {
Bundle exportBundle = makeBundleWithExports("export.bundle", "1.2.3",
"export.package;version=\"1.0.0\";uses:=\"foo.jar,bar.jar\";singleton:=true");
Dictionary fragmentHeaders = makeFragmentFromExportBundle(exportBundle)
.getHeaders();
assertNotNull("No headers in the fragment", fragmentHeaders);
assertEquals("Wrong symbolicName", "scooby.doo.test.fragment",
fragmentHeaders.get(Constants.BUNDLE_SYMBOLICNAME));
assertEquals("Wrong version", "0.0.0", fragmentHeaders
.get(Constants.BUNDLE_VERSION));
assertEquals("Wrong Bundle manifest version", "2", fragmentHeaders
.get(Constants.BUNDLE_MANIFESTVERSION));
assertEquals("Wrong Fragment host",
"scooby.doo;bundle-version=\"0.0.0\"", fragmentHeaders
.get(Constants.FRAGMENT_HOST));
assertEquals(
"Wrong Imports",
"export.package;version=\"1.0.0\";bundle-symbolic-name=\"export.bundle\";bundle-version=\"[1.2.3,1.2.3]\"",
fragmentHeaders.get(Constants.IMPORT_PACKAGE));
}
|
public static String docValuesId(String segmentsName, int fieldId) {
return segmentsName + "-" + fieldId;
}
| public static String docValuesId(String segmentsName, int fieldId) {
return segmentsName + "_" + fieldId;
}
|
private boolean[] expandBooleanArray(boolean[] array, int newLength) {
if (array == null) {
boolean[] newArray = new boolean[newLength];
return newArray;
}
if (array.length < newLength) {
boolean[] newArray = new boolean[newLength];
System.arraycopy(array, 0, newArray, 0, array.length);
return newArray;
}
return array;
}
void flowPrepareForSelectFromInsert() throws SqlException {
agent_.beginWriteChain(this);
writePrepareDescribeInputOutput(constructSelectFromInsertSQL(sql_), section_);
agent_.flow(this);
readPrepareDescribeInputOutput();
agent_.endReadChain();
}
void writePrepareDescribeInputOutput(String sql,
Section section) throws SqlException {
// Notice that sql_ is passed in since in general ad hoc sql must be passed in for unprepared statements
writePrepareDescribeOutput(sql, section);
writeDescribeInput(section);
}
void flowPrepareDescribeInputOutput() throws SqlException {
agent_.beginWriteChain(this);
if (sqlMode_ == isCall__) {
writePrepareDescribeInput();
agent_.flow(this);
readPrepareDescribeInput();
agent_.endReadChain();
} else {
writePrepareDescribeInputOutput();
agent_.flow(this);
readPrepareDescribeInputOutput();
agent_.endReadChain();
}
}
void flowExecute(int executeType) throws SqlException {
super.checkForClosedStatement();
super.clearWarningsX();
super.checkForAppropriateSqlMode(executeType, sqlMode_);
checkThatAllParametersAreSet();
if (sqlMode_ == isUpdate__) {
updateCount_ = 0;
} else {
updateCount_ = -1;
}
java.util.Timer queryTimer = null;
QueryTimerTask queryTimerTask = null;
if (timeout_ != 0) {
queryTimer = new java.util.Timer(); // A thread that ticks the seconds
queryTimerTask = new QueryTimerTask(this, queryTimer);
queryTimer.schedule(queryTimerTask, 1000 * timeout_);
}
try {
agent_.beginWriteChain(this);
boolean piggybackedAutocommit = super.writeCloseResultSets(true); // true means permit auto-commits
int numInputColumns = (parameterMetaData_ != null) ? parameterMetaData_.getColumnCount() : 0;
boolean outputExpected = (resultSetMetaData_ != null && resultSetMetaData_.getColumnCount() > 0);
boolean chainAutoCommit = false;
boolean commitSubstituted = false;
boolean repositionedCursor = false;
ResultSet scrollableRS = null;
switch (sqlMode_) {
case isUpdate__:
if (positionedUpdateCursorName_ != null) {
scrollableRS = agent_.sectionManager_.getPositionedUpdateResultSet(positionedUpdateCursorName_);
}
if (scrollableRS != null && !scrollableRS.isRowsetCursor_) {
repositionedCursor =
scrollableRS.repositionScrollableResultSetBeforeJDBC1PositionedUpdateDelete();
if (!repositionedCursor) {
scrollableRS = null;
}
}
chainAutoCommit = connection_.willAutoCommitGenerateFlow() && isAutoCommittableStatement_;
if (sqlUpdateMode_ == isInsertSql__ && generatedKeysColumnNames_ != null) {
writeOpenQuery(section_,
fetchSize_,
resultSetType_,
numInputColumns,
parameterMetaData_,
parameters_);
} else {
boolean chainOpenQueryForAutoGeneratedKeys = (sqlUpdateMode_ == isInsertSql__ && autoGeneratedKeys_ == RETURN_GENERATED_KEYS);
writeExecute(section_,
parameterMetaData_,
parameters_,
numInputColumns,
outputExpected,
(chainAutoCommit || chainOpenQueryForAutoGeneratedKeys)// chain flag
); // chain flag
if (chainOpenQueryForAutoGeneratedKeys) {
prepareAutoGeneratedKeysStatement();
writeOpenQuery(preparedStatementForAutoGeneratedKeys_.section_,
preparedStatementForAutoGeneratedKeys_.fetchSize_,
preparedStatementForAutoGeneratedKeys_.resultSetType_);
}
}
if (chainAutoCommit) {
// we have encountered an error in writing the execute, so do not
// flow an autocommit
if (agent_.accumulatedReadExceptions_ != null) {
// currently, the only write exception we encounter is for
// data truncation: SQLSTATE 01004, so we don't bother checking for this
connection_.writeCommitSubstitute_();
commitSubstituted = true;
} else {
// there is no write error, so flow the commit
connection_.writeCommit();
}
}
break;
case isQuery__:
writeOpenQuery(section_,
fetchSize_,
resultSetType_,
numInputColumns,
parameterMetaData_,
parameters_);
break;
case isCall__:
writeExecuteCall(outputRegistered_, // if no out/inout parameter, outputExpected = false
null,
section_,
fetchSize_,
false, // do not suppress ResultSets for regular CALLs
resultSetType_,
parameterMetaData_,
parameters_); // cross conversion
break;
}
agent_.flow(this);
super.readCloseResultSets(true); // true means permit auto-commits
// turn inUnitOfWork_ flag back on and add statement
// back on commitListeners_ list if they were off
// by an autocommit chained to a close cursor.
if (piggybackedAutocommit) {
connection_.completeTransactionStart();
}
super.markResultSetsClosed();
switch (sqlMode_) {
case isUpdate__:
// do not need to reposition for a rowset cursor
if (scrollableRS != null && !scrollableRS.isRowsetCursor_) {
scrollableRS.readPositioningFetch_();
}
if (sqlUpdateMode_ == isInsertSql__ && generatedKeysColumnNames_ != null) {
readOpenQuery();
if (resultSet_ != null) {
generatedKeysResultSet_ = resultSet_;
resultSet_ = null;
updateCount_ = 1;
}
} else {
readExecute();
if (sqlUpdateMode_ == isInsertSql__ && autoGeneratedKeys_ == RETURN_GENERATED_KEYS) {
readPrepareAutoGeneratedKeysStatement();
preparedStatementForAutoGeneratedKeys_.readOpenQuery();
generatedKeysResultSet_ = preparedStatementForAutoGeneratedKeys_.resultSet_;
preparedStatementForAutoGeneratedKeys_.resultSet_ = null;
}
}
if (chainAutoCommit) {
if (commitSubstituted) {
connection_.readCommitSubstitute_();
} else {
connection_.readCommit();
}
}
break;
case isQuery__:
try {
readOpenQuery();
} catch (DisconnectException dise) {
throw dise;
} catch (SqlException e) {
throw e;
}
// resultSet_ is null if open query failed.
// check for null resultSet_ before using it.
if (resultSet_ != null) {
resultSet_.parseScrollableRowset();
//if (resultSet_.scrollable_) resultSet_.getRowCount();
// If client's cursor name is set, map the client's cursor name to the ResultSet
// Else map the server's cursor name to the ResultSet
mapCursorNameToResultSet();
}
break;
case isCall__:
readExecuteCall();
break;
}
try {
agent_.endReadChain();
} catch (SqlException e) {
throw e;
}
if (sqlMode_ == isCall__) {
parseStorProcReturnedScrollableRowset();
// When there are no result sets back, we will commit immediately when autocommit is true.
// make sure a commit is not performed when making the call to the sqlca message procedure
if (connection_.autoCommit_ && resultSet_ == null && resultSetList_ == null && isAutoCommittableStatement_) {
connection_.flowAutoCommit();
}
}
// Throw an exception if holdability returned by the server is different from requested.
if (resultSet_ != null && resultSet_.resultSetHoldability_ != resultSetHoldability_ && sqlMode_ != isCall__) {
throw new SqlException(agent_.logWriter_, "Unable to open resultSet with requested " +
"holdability " + resultSetHoldability_ + ".");
}
} finally {
if (timeout_ != 0) { // query timers need to be cancelled.
queryTimer.cancel();
queryTimerTask.cancel();
}
}
}
| private boolean[] expandBooleanArray(boolean[] array, int newLength) {
if (array == null) {
boolean[] newArray = new boolean[newLength];
return newArray;
}
if (array.length < newLength) {
boolean[] newArray = new boolean[newLength];
System.arraycopy(array, 0, newArray, 0, array.length);
return newArray;
}
return array;
}
void flowPrepareForSelectFromInsert() throws SqlException {
agent_.beginWriteChain(this);
writePrepareDescribeInputOutput(constructSelectFromInsertSQL(sql_), section_);
agent_.flow(this);
readPrepareDescribeInputOutput();
agent_.endReadChain();
}
void writePrepareDescribeInputOutput(String sql,
Section section) throws SqlException {
// Notice that sql_ is passed in since in general ad hoc sql must be passed in for unprepared statements
writePrepareDescribeOutput(sql, section);
writeDescribeInput(section);
}
void flowPrepareDescribeInputOutput() throws SqlException {
agent_.beginWriteChain(this);
if (sqlMode_ == isCall__) {
writePrepareDescribeInput();
agent_.flow(this);
readPrepareDescribeInput();
agent_.endReadChain();
} else {
writePrepareDescribeInputOutput();
agent_.flow(this);
readPrepareDescribeInputOutput();
agent_.endReadChain();
}
}
void flowExecute(int executeType) throws SqlException {
super.checkForClosedStatement();
super.clearWarningsX();
super.checkForAppropriateSqlMode(executeType, sqlMode_);
checkThatAllParametersAreSet();
if (sqlMode_ == isUpdate__) {
updateCount_ = 0;
} else {
updateCount_ = -1;
}
java.util.Timer queryTimer = null;
QueryTimerTask queryTimerTask = null;
if (timeout_ != 0) {
queryTimer = new java.util.Timer(); // A thread that ticks the seconds
queryTimerTask = new QueryTimerTask(this, queryTimer);
queryTimer.schedule(queryTimerTask, 1000 * timeout_);
}
try {
agent_.beginWriteChain(this);
boolean piggybackedAutocommit = super.writeCloseResultSets(true); // true means permit auto-commits
int numInputColumns = (parameterMetaData_ != null) ? parameterMetaData_.getColumnCount() : 0;
boolean outputExpected = (resultSetMetaData_ != null && resultSetMetaData_.getColumnCount() > 0);
boolean chainAutoCommit = false;
boolean commitSubstituted = false;
boolean repositionedCursor = false;
ResultSet scrollableRS = null;
switch (sqlMode_) {
case isUpdate__:
if (positionedUpdateCursorName_ != null) {
scrollableRS = agent_.sectionManager_.getPositionedUpdateResultSet(positionedUpdateCursorName_);
}
if (scrollableRS != null && !scrollableRS.isRowsetCursor_) {
repositionedCursor =
scrollableRS.repositionScrollableResultSetBeforeJDBC1PositionedUpdateDelete();
if (!repositionedCursor) {
scrollableRS = null;
}
}
chainAutoCommit = connection_.willAutoCommitGenerateFlow() && isAutoCommittableStatement_;
if (sqlUpdateMode_ == isInsertSql__ && generatedKeysColumnNames_ != null) {
writeOpenQuery(section_,
fetchSize_,
resultSetType_,
numInputColumns,
parameterMetaData_,
parameters_);
} else {
boolean chainOpenQueryForAutoGeneratedKeys = (sqlUpdateMode_ == isInsertSql__ && autoGeneratedKeys_ == RETURN_GENERATED_KEYS);
writeExecute(section_,
parameterMetaData_,
parameters_,
numInputColumns,
outputExpected,
(chainAutoCommit || chainOpenQueryForAutoGeneratedKeys)// chain flag
); // chain flag
if (chainOpenQueryForAutoGeneratedKeys) {
prepareAutoGeneratedKeysStatement();
writeOpenQuery(preparedStatementForAutoGeneratedKeys_.section_,
preparedStatementForAutoGeneratedKeys_.fetchSize_,
preparedStatementForAutoGeneratedKeys_.resultSetType_);
}
}
if (chainAutoCommit) {
// we have encountered an error in writing the execute, so do not
// flow an autocommit
if (agent_.accumulatedReadExceptions_ != null) {
// currently, the only write exception we encounter is for
// data truncation: SQLSTATE 01004, so we don't bother checking for this
connection_.writeCommitSubstitute_();
commitSubstituted = true;
} else {
// there is no write error, so flow the commit
connection_.writeCommit();
}
}
break;
case isQuery__:
writeOpenQuery(section_,
fetchSize_,
resultSetType_,
numInputColumns,
parameterMetaData_,
parameters_);
break;
case isCall__:
writeExecuteCall(outputRegistered_, // if no out/inout parameter, outputExpected = false
null,
section_,
fetchSize_,
false, // do not suppress ResultSets for regular CALLs
resultSetType_,
parameterMetaData_,
parameters_); // cross conversion
break;
}
agent_.flow(this);
super.readCloseResultSets(true); // true means permit auto-commits
// turn inUnitOfWork_ flag back on and add statement
// back on commitListeners_ list if they were off
// by an autocommit chained to a close cursor.
if (piggybackedAutocommit) {
connection_.completeTransactionStart();
}
super.markResultSetsClosed(true); // true means remove from list of commit and rollback listeners
switch (sqlMode_) {
case isUpdate__:
// do not need to reposition for a rowset cursor
if (scrollableRS != null && !scrollableRS.isRowsetCursor_) {
scrollableRS.readPositioningFetch_();
}
if (sqlUpdateMode_ == isInsertSql__ && generatedKeysColumnNames_ != null) {
readOpenQuery();
if (resultSet_ != null) {
generatedKeysResultSet_ = resultSet_;
resultSet_ = null;
updateCount_ = 1;
}
} else {
readExecute();
if (sqlUpdateMode_ == isInsertSql__ && autoGeneratedKeys_ == RETURN_GENERATED_KEYS) {
readPrepareAutoGeneratedKeysStatement();
preparedStatementForAutoGeneratedKeys_.readOpenQuery();
generatedKeysResultSet_ = preparedStatementForAutoGeneratedKeys_.resultSet_;
preparedStatementForAutoGeneratedKeys_.resultSet_ = null;
}
}
if (chainAutoCommit) {
if (commitSubstituted) {
connection_.readCommitSubstitute_();
} else {
connection_.readCommit();
}
}
break;
case isQuery__:
try {
readOpenQuery();
} catch (DisconnectException dise) {
throw dise;
} catch (SqlException e) {
throw e;
}
// resultSet_ is null if open query failed.
// check for null resultSet_ before using it.
if (resultSet_ != null) {
resultSet_.parseScrollableRowset();
//if (resultSet_.scrollable_) resultSet_.getRowCount();
// If client's cursor name is set, map the client's cursor name to the ResultSet
// Else map the server's cursor name to the ResultSet
mapCursorNameToResultSet();
}
break;
case isCall__:
readExecuteCall();
break;
}
try {
agent_.endReadChain();
} catch (SqlException e) {
throw e;
}
if (sqlMode_ == isCall__) {
parseStorProcReturnedScrollableRowset();
// When there are no result sets back, we will commit immediately when autocommit is true.
// make sure a commit is not performed when making the call to the sqlca message procedure
if (connection_.autoCommit_ && resultSet_ == null && resultSetList_ == null && isAutoCommittableStatement_) {
connection_.flowAutoCommit();
}
}
// Throw an exception if holdability returned by the server is different from requested.
if (resultSet_ != null && resultSet_.resultSetHoldability_ != resultSetHoldability_ && sqlMode_ != isCall__) {
throw new SqlException(agent_.logWriter_, "Unable to open resultSet with requested " +
"holdability " + resultSetHoldability_ + ".");
}
} finally {
if (timeout_ != 0) { // query timers need to be cancelled.
queryTimer.cancel();
queryTimerTask.cancel();
}
}
}
|
public void distribCommit(CommitUpdateCommand cmd, List<Node> nodes,
ModifiableSolrParams params) throws IOException {
// we need to do any retries before commit...
servers.blockUntilFinished();
doRetriesIfNeeded();
UpdateRequest uReq = new UpdateRequest();
uReq.setParams(params);
addCommit(uReq, cmd);
log.debug("Distrib commit to:" + nodes + " params:" + params);
for (Node node : nodes) {
submit(new Req(cmd.toString(), node, uReq, false));
}
}
| public void distribCommit(CommitUpdateCommand cmd, List<Node> nodes,
ModifiableSolrParams params) throws IOException {
// we need to do any retries before commit...
servers.blockUntilFinished();
doRetriesIfNeeded();
UpdateRequest uReq = new UpdateRequest();
uReq.setParams(params);
addCommit(uReq, cmd);
log.debug("Distrib commit to: {} params: {}", nodes, params);
for (Node node : nodes) {
submit(new Req(cmd.toString(), node, uReq, false));
}
}
|
public static void validateKeyspaceNotYetExisting(String newKsName) throws InvalidRequestException
{
// keyspace names must be unique case-insensitively because the keyspace name becomes the directory
// where we store CF sstables. Names that differ only in case would thus cause problems on
// case-insensitive filesystems (NTFS, most installations of HFS+).
for (String ksName : DatabaseDescriptor.getTables())
{
if (ksName.equalsIgnoreCase(newKsName))
throw new InvalidRequestException("Keyspace names must be case-insensitively unique");
}
}
| public static void validateKeyspaceNotYetExisting(String newKsName) throws InvalidRequestException
{
// keyspace names must be unique case-insensitively because the keyspace name becomes the directory
// where we store CF sstables. Names that differ only in case would thus cause problems on
// case-insensitive filesystems (NTFS, most installations of HFS+).
for (String ksName : Schema.instance.getTables())
{
if (ksName.equalsIgnoreCase(newKsName))
throw new InvalidRequestException("Keyspace names must be case-insensitively unique");
}
}
|
public void testTriggersWithClobColumn() throws Exception {
insertDefaultData();
Statement stmt = createStatement();
stmt.executeUpdate(
"CREATE TABLE testClobTriggerA (a CLOB(400k), b int)");
stmt.executeUpdate(
"CREATE TABLE testClobTriggerB (a CLOB(400k), b int)");
stmt.executeUpdate(
"create trigger T13A after update on testClob " +
"referencing new as n old as o " +
"for each row "+
"insert into testClobTriggerA(a, b) values (n.a, n.b)");
stmt.executeUpdate(
"create trigger T13B after INSERT on testClobTriggerA " +
"referencing new table as n " +
"for each statement "+
"insert into testClobTriggerB(a, b) select n.a, n.b from n");
commit();
// Fire the triggers
stmt.executeUpdate("UPDATE testClob SET b = b + 0");
commit();
// Verify the results
Statement origSt = createStatement();
Statement trigASt = createStatement();
Statement trigBSt = createStatement();
ResultSet origRS = origSt.executeQuery(
"select a, length(a), b from testClob order by b");
ResultSet trigARS = trigASt.executeQuery(
"select a, length(a), b from testClobTriggerA order by b");
ResultSet trigBRS = trigBSt.executeQuery(
"select a, length(a), b from testClobTriggerA order by b");
int count = 0;
while (origRS.next()) {
count ++;
assertTrue("row trigger produced less rows " +
count, trigARS.next());
assertTrue("statement trigger produced less rows " +
count, trigBRS.next());
if (origRS.getClob(1) != null) {
assertEquals("FAIL - Invalid checksum for row trigger",
getStreamCheckSum(origRS.getClob(1).getAsciiStream()),
getStreamCheckSum(trigARS.getClob(1).getAsciiStream()));
assertEquals("FAIL - Invalid checksum for statement trigger",
getStreamCheckSum(origRS.getClob(1).getAsciiStream()),
getStreamCheckSum(trigBRS.getClob(1).getAsciiStream()));
}
assertEquals("FAIL - Invalid length in row trigger",
origRS.getInt(2), trigARS.getInt(2));
assertEquals("FAIL - Invalid length in statement trigger",
origRS.getInt(2), trigBRS.getInt(2));
assertEquals("FAIL - Length not updated on row trigger",
origRS.getInt(3), trigARS.getInt(3));
assertEquals("FAIL - Length not updated on statement trigger",
origRS.getInt(3), trigBRS.getInt(3));
}
origRS.close();
trigARS.close();
trigBRS.close();
origSt.close();
trigASt.close();
trigBSt.close();
stmt.executeUpdate("DROP TRIGGER T13A");
stmt.executeUpdate("DROP TRIGGER T13B");
stmt.executeUpdate("DROP TABLE testClobTriggerB");
stmt.executeUpdate("DROP TABLE testClobTriggerA");
stmt.close();
commit();
}
| public void testTriggersWithClobColumn() throws Exception {
insertDefaultData();
Statement stmt = createStatement();
stmt.executeUpdate(
"CREATE TABLE testClobTriggerA (a CLOB(400k), b int)");
stmt.executeUpdate(
"CREATE TABLE testClobTriggerB (a CLOB(400k), b int)");
stmt.executeUpdate(
"create trigger T13A after update on testClob " +
"referencing new as n old as o " +
"for each row "+
"insert into testClobTriggerA(a, b) values (n.a, n.b)");
stmt.executeUpdate(
"create trigger T13B after INSERT on testClobTriggerA " +
"referencing new table as n " +
"for each statement "+
"insert into testClobTriggerB(a, b) select n.a, n.b from n");
commit();
// Fire the triggers
stmt.executeUpdate("UPDATE testClob SET b = b + 0");
commit();
// Verify the results
Statement origSt = createStatement();
Statement trigASt = createStatement();
Statement trigBSt = createStatement();
ResultSet origRS = origSt.executeQuery(
"select a, length(a), b from testClob order by b");
ResultSet trigARS = trigASt.executeQuery(
"select a, length(a), b from testClobTriggerA order by b");
ResultSet trigBRS = trigBSt.executeQuery(
"select a, length(a), b from testClobTriggerB order by b");
int count = 0;
while (origRS.next()) {
count ++;
assertTrue("row trigger produced less rows " +
count, trigARS.next());
assertTrue("statement trigger produced less rows " +
count, trigBRS.next());
if (origRS.getClob(1) != null) {
assertEquals("FAIL - Invalid checksum for row trigger",
getStreamCheckSum(origRS.getClob(1).getAsciiStream()),
getStreamCheckSum(trigARS.getClob(1).getAsciiStream()));
assertEquals("FAIL - Invalid checksum for statement trigger",
getStreamCheckSum(origRS.getClob(1).getAsciiStream()),
getStreamCheckSum(trigBRS.getClob(1).getAsciiStream()));
}
assertEquals("FAIL - Invalid length in row trigger",
origRS.getInt(2), trigARS.getInt(2));
assertEquals("FAIL - Invalid length in statement trigger",
origRS.getInt(2), trigBRS.getInt(2));
assertEquals("FAIL - Length not updated on row trigger",
origRS.getInt(3), trigARS.getInt(3));
assertEquals("FAIL - Length not updated on statement trigger",
origRS.getInt(3), trigBRS.getInt(3));
}
origRS.close();
trigARS.close();
trigBRS.close();
origSt.close();
trigASt.close();
trigBSt.close();
stmt.executeUpdate("DROP TRIGGER T13A");
stmt.executeUpdate("DROP TRIGGER T13B");
stmt.executeUpdate("DROP TABLE testClobTriggerB");
stmt.executeUpdate("DROP TABLE testClobTriggerA");
stmt.close();
commit();
}
|
public static Test suite()
{
String testName = "InterruptResilienceTest";
if (! isSunJVM()) {
// DERBY-4463 test fails on IBM VM 1.5.
// It's fixed in IBM VM 1.6 SR9 and above.
// Remove this condition when that issue is solved in IBM VM 1.5 SR13.
if (getSystemProperty("java.version").startsWith("1.5.0"))
{
println("Test skipped for this VM, cf. DERBY-4463");
return new TestSuite(testName);
}
}
if (!JDBC.vmSupportsJDBC3()) {
println("Test skipped for this VM, " +
"DriverManager is not supported with JSR169");
return new TestSuite(testName);
}
if (hasInterruptibleIO()) {
println("Test skipped due to interruptible IO.");
println("This is default on Solaris/Sun Java <= 1.6, use " +
"-XX:-UseVMInterruptibleIO if available.");
return new TestSuite(testName);
}
return makeSuite(testName);
}
| public static Test suite()
{
String testName = "InterruptResilienceTest";
if (isIBMJVM()) {
// DERBY-4463 test fails on IBM VM 1.5.
// It's fixed in IBM VM 1.6 SR9 and above.
// Remove this condition when that issue is solved in IBM VM 1.5 SR13.
if (getSystemProperty("java.version").startsWith("1.5.0"))
{
println("Test skipped for this VM, cf. DERBY-4463");
return new TestSuite(testName);
}
}
if (!JDBC.vmSupportsJDBC3()) {
println("Test skipped for this VM, " +
"DriverManager is not supported with JSR169");
return new TestSuite(testName);
}
if (hasInterruptibleIO()) {
println("Test skipped due to interruptible IO.");
println("This is default on Solaris/Sun Java <= 1.6, use " +
"-XX:-UseVMInterruptibleIO if available.");
return new TestSuite(testName);
}
return makeSuite(testName);
}
|
public static Test suite() {
if (! isSunJVM()) {
// DERBY-4463 test fails on IBM VM 1.5.
// It's fixed in IBM VM 1.6 SR9 and above.
// Remove this condition when that issue is solved in IBM VM 1.5 SR13.
if (getSystemProperty("java.version").startsWith("1.5.0"))
{
println("Test skipped for this VM, cf. DERBY-4463");
return new TestSuite("empty LockInterruptTest");
}
}
// Only run in embedded mode since we cannot interrupt the engine
// thread from the network client.
Test test = TestConfiguration.embeddedSuite(LockInterruptTest.class);
// Set the lock timeout to a known value so that we know what to
// expect for timeouts.
test = DatabasePropertyTestSetup.setLockTimeouts(
test, DEADLOCK_TIMEOUT, LOCK_TIMEOUT);
Properties syspros = new Properties();
//Derby-4856 interrupt error create thread dump and diagnostic
//info. Add property to avoid the information.
syspros.put("derby.stream.error.extendedDiagSeverityLevel", "50000");
test = new SystemPropertyTestSetup(test, syspros, true);
return new CleanDatabaseTestSetup(test);
}
| public static Test suite() {
if (isIBMJVM()) {
// DERBY-4463 test fails on IBM VM 1.5.
// It's fixed in IBM VM 1.6 SR9 and above.
// Remove this condition when that issue is solved in IBM VM 1.5 SR13.
if (getSystemProperty("java.version").startsWith("1.5.0"))
{
println("Test skipped for this VM, cf. DERBY-4463");
return new TestSuite("empty LockInterruptTest");
}
}
// Only run in embedded mode since we cannot interrupt the engine
// thread from the network client.
Test test = TestConfiguration.embeddedSuite(LockInterruptTest.class);
// Set the lock timeout to a known value so that we know what to
// expect for timeouts.
test = DatabasePropertyTestSetup.setLockTimeouts(
test, DEADLOCK_TIMEOUT, LOCK_TIMEOUT);
Properties syspros = new Properties();
//Derby-4856 interrupt error create thread dump and diagnostic
//info. Add property to avoid the information.
syspros.put("derby.stream.error.extendedDiagSeverityLevel", "50000");
test = new SystemPropertyTestSetup(test, syspros, true);
return new CleanDatabaseTestSetup(test);
}
|
public void map(LongWritable userID,
VectorWritable vectorWritable,
OutputCollector<LongWritable, RecommendedItemsWritable> output,
Reporter reporter) throws IOException {
if (usersToRecommendFor != null && !usersToRecommendFor.contains(userID.get())) {
return;
}
Vector userVector = vectorWritable.get();
Iterator<Vector.Element> userVectorIterator = userVector.iterateNonZero();
Vector recommendationVector = new RandomAccessSparseVector(Integer.MAX_VALUE, 1000);
while (userVectorIterator.hasNext()) {
Vector.Element element = userVectorIterator.next();
int index = element.index();
double value = element.get();
Vector columnVector;
try {
columnVector = cooccurrenceColumnCache.get(new IntWritable(index));
} catch (TasteException te) {
if (te.getCause() instanceof IOException) {
throw (IOException) te.getCause();
} else {
throw new IOException(te.getCause());
}
}
columnVector.times(value).addTo(recommendationVector);
}
Queue<RecommendedItem> topItems =
new PriorityQueue<RecommendedItem>(recommendationsPerUser + 1, Collections.reverseOrder());
Iterator<Vector.Element> recommendationVectorIterator = recommendationVector.iterateNonZero();
LongWritable itemID = new LongWritable();
while (recommendationVectorIterator.hasNext()) {
Vector.Element element = recommendationVectorIterator.next();
int index = element.index();
if (userVector.get(index) != 0.0) {
if (topItems.size() < recommendationsPerUser) {
indexItemIDMap.get(new IntWritable(index), itemID);
topItems.add(new GenericRecommendedItem(itemID.get(), (float) element.get()));
} else if (element.get() > topItems.peek().getValue()) {
indexItemIDMap.get(new IntWritable(index), itemID);
topItems.add(new GenericRecommendedItem(itemID.get(), (float) element.get()));
topItems.poll();
}
}
}
List<RecommendedItem> recommendations = new ArrayList<RecommendedItem>(topItems.size());
recommendations.addAll(topItems);
Collections.sort(recommendations);
output.collect(userID, new RecommendedItemsWritable(recommendations));
}
| public void map(LongWritable userID,
VectorWritable vectorWritable,
OutputCollector<LongWritable, RecommendedItemsWritable> output,
Reporter reporter) throws IOException {
if (usersToRecommendFor != null && !usersToRecommendFor.contains(userID.get())) {
return;
}
Vector userVector = vectorWritable.get();
Iterator<Vector.Element> userVectorIterator = userVector.iterateNonZero();
Vector recommendationVector = new RandomAccessSparseVector(Integer.MAX_VALUE, 1000);
while (userVectorIterator.hasNext()) {
Vector.Element element = userVectorIterator.next();
int index = element.index();
double value = element.get();
Vector columnVector;
try {
columnVector = cooccurrenceColumnCache.get(new IntWritable(index));
} catch (TasteException te) {
if (te.getCause() instanceof IOException) {
throw (IOException) te.getCause();
} else {
throw new IOException(te.getCause());
}
}
columnVector.times(value).addTo(recommendationVector);
}
Queue<RecommendedItem> topItems =
new PriorityQueue<RecommendedItem>(recommendationsPerUser + 1, Collections.reverseOrder());
Iterator<Vector.Element> recommendationVectorIterator = recommendationVector.iterateNonZero();
LongWritable itemID = new LongWritable();
while (recommendationVectorIterator.hasNext()) {
Vector.Element element = recommendationVectorIterator.next();
int index = element.index();
if (userVector.get(index) == 0.0) {
if (topItems.size() < recommendationsPerUser) {
indexItemIDMap.get(new IntWritable(index), itemID);
topItems.add(new GenericRecommendedItem(itemID.get(), (float) element.get()));
} else if (element.get() > topItems.peek().getValue()) {
indexItemIDMap.get(new IntWritable(index), itemID);
topItems.add(new GenericRecommendedItem(itemID.get(), (float) element.get()));
topItems.poll();
}
}
}
List<RecommendedItem> recommendations = new ArrayList<RecommendedItem>(topItems.size());
recommendations.addAll(topItems);
Collections.sort(recommendations);
output.collect(userID, new RecommendedItemsWritable(recommendations));
}
|
public int docID() {
return docIt >= upto ? NO_MORE_DOCS : docs[docIt];
}
| public int docID() {
return docIt < 0 ? -1 : docIt >= upto ? NO_MORE_DOCS : docs[docIt];
}
|
private boolean mergeClosestClusters(int numUsers, List<FastIDSet> clusters, boolean done) throws TasteException {
// We find a certain number of closest clusters...
List<ClusterClusterPair> queue = findClosestClusters(numUsers, clusters);
// The first one is definitely the closest pair in existence so we can cluster
// the two together, put it back into the set of clusters, and start again. Instead
// we assume everything else in our list of closest cluster pairs is still pretty good,
// and we cluster them too.
while (!queue.isEmpty()) {
if (!clusteringByThreshold && clusters.size() <= numClusters) {
done = true;
break;
}
ClusterClusterPair top = queue.remove(0);
if (clusteringByThreshold && top.getSimilarity() < clusteringThreshold) {
done = true;
break;
}
FastIDSet cluster1 = top.getCluster1();
FastIDSet cluster2 = top.getCluster2();
// Pull out current two clusters from clusters
Iterator<FastIDSet> clusterIterator = clusters.iterator();
boolean removed1 = false;
boolean removed2 = false;
while (clusterIterator.hasNext() && !(removed1 && removed2)) {
FastIDSet current = clusterIterator.next();
// Yes, use == here
if (!removed1 && cluster1 == current) {
clusterIterator.remove();
removed1 = true;
} else if (!removed2 && cluster2 == current) {
clusterIterator.remove();
removed2 = true;
}
}
// The only catch is if a cluster showed it twice in the list of best cluster pairs;
// have to remove the others. Pull out anything referencing these clusters from queue
for (Iterator<ClusterClusterPair> queueIterator = queue.iterator(); queueIterator.hasNext();) {
ClusterClusterPair pair = queueIterator.next();
FastIDSet pair1 = pair.getCluster1();
FastIDSet pair2 = pair.getCluster2();
if (pair1 == cluster1 || pair1 == cluster2 || pair2 == cluster1 || pair2 == cluster2) {
queueIterator.remove();
}
}
// Make new merged cluster
FastIDSet merged = new FastIDSet(cluster1.size() + cluster2.size());
merged.addAll(cluster1);
merged.addAll(cluster2);
// Compare against other clusters; update queue if needed
// That new pair we're just adding might be pretty close to something else, so
// catch that case here and put it back into our queue
for (FastIDSet cluster : clusters) {
double similarity = clusterSimilarity.getSimilarity(merged, cluster);
if (similarity > queue.get(queue.size() - 1).getSimilarity()) {
ListIterator<ClusterClusterPair> queueIterator = queue.listIterator();
while (queueIterator.hasNext()) {
if (similarity > queueIterator.next().getSimilarity()) {
queueIterator.previous();
break;
}
}
queueIterator.add(new ClusterClusterPair(merged, cluster, similarity));
}
}
// Finally add new cluster to list
clusters.add(merged);
}
return done;
}
| private boolean mergeClosestClusters(int numUsers, List<FastIDSet> clusters, boolean done) throws TasteException {
// We find a certain number of closest clusters...
List<ClusterClusterPair> queue = findClosestClusters(numUsers, clusters);
// The first one is definitely the closest pair in existence so we can cluster
// the two together, put it back into the set of clusters, and start again. Instead
// we assume everything else in our list of closest cluster pairs is still pretty good,
// and we cluster them too.
while (!queue.isEmpty()) {
if (!clusteringByThreshold && clusters.size() <= numClusters) {
done = true;
break;
}
ClusterClusterPair top = queue.remove(0);
if (clusteringByThreshold && top.getSimilarity() < clusteringThreshold) {
done = true;
break;
}
FastIDSet cluster1 = top.getCluster1();
FastIDSet cluster2 = top.getCluster2();
// Pull out current two clusters from clusters
Iterator<FastIDSet> clusterIterator = clusters.iterator();
boolean removed1 = false;
boolean removed2 = false;
while (clusterIterator.hasNext() && !(removed1 && removed2)) {
FastIDSet current = clusterIterator.next();
// Yes, use == here
if (!removed1 && cluster1 == current) {
clusterIterator.remove();
removed1 = true;
} else if (!removed2 && cluster2 == current) {
clusterIterator.remove();
removed2 = true;
}
}
// The only catch is if a cluster showed it twice in the list of best cluster pairs;
// have to remove the others. Pull out anything referencing these clusters from queue
for (Iterator<ClusterClusterPair> queueIterator = queue.iterator(); queueIterator.hasNext();) {
ClusterClusterPair pair = queueIterator.next();
FastIDSet pair1 = pair.getCluster1();
FastIDSet pair2 = pair.getCluster2();
if (pair1 == cluster1 || pair1 == cluster2 || pair2 == cluster1 || pair2 == cluster2) {
queueIterator.remove();
}
}
// Make new merged cluster
FastIDSet merged = new FastIDSet(cluster1.size() + cluster2.size());
merged.addAll(cluster1);
merged.addAll(cluster2);
// Compare against other clusters; update queue if needed
// That new pair we're just adding might be pretty close to something else, so
// catch that case here and put it back into our queue
for (FastIDSet cluster : clusters) {
double similarity = clusterSimilarity.getSimilarity(merged, cluster);
if (queue.size() > 0 && similarity > queue.get(queue.size() - 1).getSimilarity()) {
ListIterator<ClusterClusterPair> queueIterator = queue.listIterator();
while (queueIterator.hasNext()) {
if (similarity > queueIterator.next().getSimilarity()) {
queueIterator.previous();
break;
}
}
queueIterator.add(new ClusterClusterPair(merged, cluster, similarity));
}
}
// Finally add new cluster to list
clusters.add(merged);
}
return done;
}
|
public static long getTotalBytes(Iterable<SSTableReader> sstables)
{
long sum = 0;
for (SSTableReader sstable : sstables)
{
sum += sstable.length();
}
return sum;
}
| public static long getTotalBytes(Iterable<SSTableReader> sstables)
{
long sum = 0;
for (SSTableReader sstable : sstables)
{
sum += sstable.onDiskLength();
}
return sum;
}
|
public CompressedSegmentedFile(String path, CompressionMetadata metadata)
{
super(path, metadata.dataLength);
this.metadata = metadata;
}
| public CompressedSegmentedFile(String path, CompressionMetadata metadata)
{
super(path, metadata.dataLength, metadata.compressedFileLength);
this.metadata = metadata;
}
|
private static List<Pair<SSTableReader, Long>> createSSTableAndLengthPairs(Collection<SSTableReader> collection)
{
List<Pair<SSTableReader, Long>> tableLengthPairs = new ArrayList<Pair<SSTableReader, Long>>();
for(SSTableReader table: collection)
tableLengthPairs.add(new Pair<SSTableReader, Long>(table, table.length()));
return tableLengthPairs;
}
| private static List<Pair<SSTableReader, Long>> createSSTableAndLengthPairs(Collection<SSTableReader> collection)
{
List<Pair<SSTableReader, Long>> tableLengthPairs = new ArrayList<Pair<SSTableReader, Long>>();
for(SSTableReader table: collection)
tableLengthPairs.add(new Pair<SSTableReader, Long>(table, table.onDiskLength()));
return tableLengthPairs;
}
|
public final void maybeRefreshBlocking() throws IOException, InterruptedException {
ensureOpen();
// Ensure only 1 thread does reopen at once
refreshLock.lock();
try {
doMaybeRefresh();
} finally {
refreshLock.lock();
}
}
| public final void maybeRefreshBlocking() throws IOException, InterruptedException {
ensureOpen();
// Ensure only 1 thread does reopen at once
refreshLock.lock();
try {
doMaybeRefresh();
} finally {
refreshLock.unlock();
}
}
|
public Sorter newSorter(Entry[] arr) {
return new ArrayTimSorter<Entry>(arr, ArrayUtil.<Entry>naturalComparator(), random().nextInt(arr.length));
}
| public Sorter newSorter(Entry[] arr) {
return new ArrayTimSorter<Entry>(arr, ArrayUtil.<Entry>naturalComparator(), _TestUtil.nextInt(random(), 0, arr.length));
}
|
protected synchronized int addCategoryDocument(CategoryPath categoryPath,
int length, int parent)
throws CorruptIndexException, IOException {
// Before Lucene 2.9, position increments >=0 were supported, so we
// added 1 to parent to allow the parent -1 (the parent of the root).
// Unfortunately, starting with Lucene 2.9, after LUCENE-1542, this is
// no longer enough, since 0 is not encoded consistently either (see
// comment in SinglePositionTokenStream). But because we must be
// backward-compatible with existing indexes, we can't just fix what
// we write here (e.g., to write parent+2), and need to do a workaround
// in the reader (which knows that anyway only category 0 has a parent
// -1).
parentStream.set(parent+1);
Document d = new Document();
d.add(parentStreamField);
fullPathField.setValue(categoryPath.toString(delimiter, length));
d.add(fullPathField);
// Note that we do no pass an Analyzer here because the fields that are
// added to the Document are untokenized or contains their own TokenStream.
// Therefore the IndexWriter's Analyzer has no effect.
indexWriter.addDocument(d);
int id = nextID++;
addToCache(categoryPath, length, id);
// also add to the parent array
getParentArray().add(id, parent);
return id;
}
| protected synchronized int addCategoryDocument(CategoryPath categoryPath,
int length, int parent)
throws CorruptIndexException, IOException {
// Before Lucene 2.9, position increments >=0 were supported, so we
// added 1 to parent to allow the parent -1 (the parent of the root).
// Unfortunately, starting with Lucene 2.9, after LUCENE-1542, this is
// no longer enough, since 0 is not encoded consistently either (see
// comment in SinglePositionTokenStream). But because we must be
// backward-compatible with existing indexes, we can't just fix what
// we write here (e.g., to write parent+2), and need to do a workaround
// in the reader (which knows that anyway only category 0 has a parent
// -1).
parentStream.set(parent+1);
Document d = new Document();
d.add(parentStreamField);
fullPathField.setStringValue(categoryPath.toString(delimiter, length));
d.add(fullPathField);
// Note that we do no pass an Analyzer here because the fields that are
// added to the Document are untokenized or contains their own TokenStream.
// Therefore the IndexWriter's Analyzer has no effect.
indexWriter.addDocument(d);
int id = nextID++;
addToCache(categoryPath, length, id);
// also add to the parent array
getParentArray().add(id, parent);
return id;
}
|
public void testPerFieldCodec() throws Exception {
final int NUM_DOCS = atLeast(173);
if (VERBOSE) {
System.out.println("TEST: NUM_DOCS=" + NUM_DOCS);
}
MockDirectoryWrapper dir = newDirectory();
dir.setCheckIndexOnClose(false); // we use a custom codec provider
IndexWriter w = new IndexWriter(
dir,
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).
setCodec(new CustomPerFieldCodec()).
setMergePolicy(newLogMergePolicy(3))
);
Document doc = new Document();
// uses default codec:
doc.add(newField("field1", "this field uses the standard codec as the test", TextField.TYPE_UNSTORED));
// uses pulsing codec:
Field field2 = newField("field2", "this field uses the pulsing codec as the test", TextField.TYPE_UNSTORED);
doc.add(field2);
Field idField = newField("id", "", StringField.TYPE_UNSTORED);
doc.add(idField);
for(int i=0;i<NUM_DOCS;i++) {
idField.setValue(""+i);
w.addDocument(doc);
if ((i+1)%10 == 0) {
w.commit();
}
}
| public void testPerFieldCodec() throws Exception {
final int NUM_DOCS = atLeast(173);
if (VERBOSE) {
System.out.println("TEST: NUM_DOCS=" + NUM_DOCS);
}
MockDirectoryWrapper dir = newDirectory();
dir.setCheckIndexOnClose(false); // we use a custom codec provider
IndexWriter w = new IndexWriter(
dir,
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).
setCodec(new CustomPerFieldCodec()).
setMergePolicy(newLogMergePolicy(3))
);
Document doc = new Document();
// uses default codec:
doc.add(newField("field1", "this field uses the standard codec as the test", TextField.TYPE_UNSTORED));
// uses pulsing codec:
Field field2 = newField("field2", "this field uses the pulsing codec as the test", TextField.TYPE_UNSTORED);
doc.add(field2);
Field idField = newField("id", "", StringField.TYPE_UNSTORED);
doc.add(idField);
for(int i=0;i<NUM_DOCS;i++) {
idField.setStringValue(""+i);
w.addDocument(doc);
if ((i+1)%10 == 0) {
w.commit();
}
}
|
public void setUp() throws Exception {
super.setUp();
dir = newDirectory();
RandomIndexWriter writer = new RandomIndexWriter(random, dir,
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random))
.setMaxBufferedDocs(_TestUtil.nextInt(random, 50, 1000)));
Document doc = new Document();
Field field = newField("field", "", StringField.TYPE_UNSTORED);
doc.add(field);
NumberFormat df = new DecimalFormat("000", new DecimalFormatSymbols(Locale.ENGLISH));
for (int i = 0; i < 1000; i++) {
field.setValue(df.format(i));
writer.addDocument(doc);
}
reader = writer.getReader();
searcher = newSearcher(reader);
writer.close();
if (VERBOSE) {
System.out.println("TEST: setUp searcher=" + searcher);
}
}
| public void setUp() throws Exception {
super.setUp();
dir = newDirectory();
RandomIndexWriter writer = new RandomIndexWriter(random, dir,
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random))
.setMaxBufferedDocs(_TestUtil.nextInt(random, 50, 1000)));
Document doc = new Document();
Field field = newField("field", "", StringField.TYPE_UNSTORED);
doc.add(field);
NumberFormat df = new DecimalFormat("000", new DecimalFormatSymbols(Locale.ENGLISH));
for (int i = 0; i < 1000; i++) {
field.setStringValue(df.format(i));
writer.addDocument(doc);
}
reader = writer.getReader();
searcher = newSearcher(reader);
writer.close();
if (VERBOSE) {
System.out.println("TEST: setUp searcher=" + searcher);
}
}
|
public void setUp() throws Exception {
super.setUp();
dir = newDirectory();
RandomIndexWriter writer = new RandomIndexWriter(random, dir,
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random))
.setMaxBufferedDocs(_TestUtil.nextInt(random, 50, 1000)));
Document doc = new Document();
FieldType customType = new FieldType(TextField.TYPE_STORED);
customType.setOmitNorms(true);
Field field = newField("field", "", customType);
doc.add(field);
NumberFormat df = new DecimalFormat("000", new DecimalFormatSymbols(Locale.ENGLISH));
for (int i = 0; i < 1000; i++) {
field.setValue(df.format(i));
writer.addDocument(doc);
}
reader = writer.getReader();
writer.close();
searcher = newSearcher(reader);
}
| public void setUp() throws Exception {
super.setUp();
dir = newDirectory();
RandomIndexWriter writer = new RandomIndexWriter(random, dir,
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random))
.setMaxBufferedDocs(_TestUtil.nextInt(random, 50, 1000)));
Document doc = new Document();
FieldType customType = new FieldType(TextField.TYPE_STORED);
customType.setOmitNorms(true);
Field field = newField("field", "", customType);
doc.add(field);
NumberFormat df = new DecimalFormat("000", new DecimalFormatSymbols(Locale.ENGLISH));
for (int i = 0; i < 1000; i++) {
field.setStringValue(df.format(i));
writer.addDocument(doc);
}
reader = writer.getReader();
writer.close();
searcher = newSearcher(reader);
}
|
public void setUp() throws Exception {
super.setUp();
dir = newDirectory();
RandomIndexWriter writer = new RandomIndexWriter(random, dir,
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random, MockTokenizer.KEYWORD, false))
.setMaxBufferedDocs(_TestUtil.nextInt(random, 50, 1000)));
Document doc = new Document();
Field field = newField("field", "", StringField.TYPE_UNSTORED);
doc.add(field);
// we generate aweful prefixes: good for testing.
// but for preflex codec, the test can be very slow, so use less iterations.
final String codec = Codec.getDefault().getName();
int num = codec.equals("Lucene3x") ? 200 * RANDOM_MULTIPLIER : atLeast(1000);
for (int i = 0; i < num; i++) {
field.setValue(_TestUtil.randomUnicodeString(random, 10));
writer.addDocument(doc);
}
reader = writer.getReader();
searcher = newSearcher(reader);
writer.close();
}
| public void setUp() throws Exception {
super.setUp();
dir = newDirectory();
RandomIndexWriter writer = new RandomIndexWriter(random, dir,
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random, MockTokenizer.KEYWORD, false))
.setMaxBufferedDocs(_TestUtil.nextInt(random, 50, 1000)));
Document doc = new Document();
Field field = newField("field", "", StringField.TYPE_UNSTORED);
doc.add(field);
// we generate aweful prefixes: good for testing.
// but for preflex codec, the test can be very slow, so use less iterations.
final String codec = Codec.getDefault().getName();
int num = codec.equals("Lucene3x") ? 200 * RANDOM_MULTIPLIER : atLeast(1000);
for (int i = 0; i < num; i++) {
field.setStringValue(_TestUtil.randomUnicodeString(random, 10));
writer.addDocument(doc);
}
reader = writer.getReader();
searcher = newSearcher(reader);
writer.close();
}
|
public void assertFromTestData(int codePointTable[]) throws Exception {
if (VERBOSE) {
System.out.println("TEST: codePointTable=" + codePointTable);
}
InputStream stream = getClass().getResourceAsStream("fuzzyTestData.txt");
BufferedReader reader = new BufferedReader(new InputStreamReader(stream, "UTF-8"));
int bits = Integer.parseInt(reader.readLine());
int terms = (int) Math.pow(2, bits);
Directory dir = newDirectory();
RandomIndexWriter writer = new RandomIndexWriter(random, dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random, MockTokenizer.KEYWORD, false)).setMergePolicy(newLogMergePolicy()));
Document doc = new Document();
Field field = newField("field", "", TextField.TYPE_UNSTORED);
doc.add(field);
for (int i = 0; i < terms; i++) {
field.setValue(mapInt(codePointTable, i));
writer.addDocument(doc);
}
IndexReader r = writer.getReader();
IndexSearcher searcher = newSearcher(r);
if (VERBOSE) {
System.out.println("TEST: searcher=" + searcher);
}
// even though this uses a boost-only rewrite, this test relies upon queryNorm being the default implementation,
// otherwise scores are different!
searcher.setSimilarity(new DefaultSimilarity());
writer.close();
String line;
while ((line = reader.readLine()) != null) {
String params[] = line.split(",");
String query = mapInt(codePointTable, Integer.parseInt(params[0]));
int prefix = Integer.parseInt(params[1]);
int pqSize = Integer.parseInt(params[2]);
float minScore = Float.parseFloat(params[3]);
FuzzyQuery q = new FuzzyQuery(new Term("field", query), minScore, prefix);
q.setRewriteMethod(new MultiTermQuery.TopTermsBoostOnlyBooleanQueryRewrite(pqSize));
int expectedResults = Integer.parseInt(reader.readLine());
TopDocs docs = searcher.search(q, expectedResults);
assertEquals(expectedResults, docs.totalHits);
for (int i = 0; i < expectedResults; i++) {
String scoreDoc[] = reader.readLine().split(",");
assertEquals(Integer.parseInt(scoreDoc[0]), docs.scoreDocs[i].doc);
assertEquals(Float.parseFloat(scoreDoc[1]), docs.scoreDocs[i].score, epsilon);
}
}
r.close();
dir.close();
}
| public void assertFromTestData(int codePointTable[]) throws Exception {
if (VERBOSE) {
System.out.println("TEST: codePointTable=" + codePointTable);
}
InputStream stream = getClass().getResourceAsStream("fuzzyTestData.txt");
BufferedReader reader = new BufferedReader(new InputStreamReader(stream, "UTF-8"));
int bits = Integer.parseInt(reader.readLine());
int terms = (int) Math.pow(2, bits);
Directory dir = newDirectory();
RandomIndexWriter writer = new RandomIndexWriter(random, dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random, MockTokenizer.KEYWORD, false)).setMergePolicy(newLogMergePolicy()));
Document doc = new Document();
Field field = newField("field", "", TextField.TYPE_UNSTORED);
doc.add(field);
for (int i = 0; i < terms; i++) {
field.setStringValue(mapInt(codePointTable, i));
writer.addDocument(doc);
}
IndexReader r = writer.getReader();
IndexSearcher searcher = newSearcher(r);
if (VERBOSE) {
System.out.println("TEST: searcher=" + searcher);
}
// even though this uses a boost-only rewrite, this test relies upon queryNorm being the default implementation,
// otherwise scores are different!
searcher.setSimilarity(new DefaultSimilarity());
writer.close();
String line;
while ((line = reader.readLine()) != null) {
String params[] = line.split(",");
String query = mapInt(codePointTable, Integer.parseInt(params[0]));
int prefix = Integer.parseInt(params[1]);
int pqSize = Integer.parseInt(params[2]);
float minScore = Float.parseFloat(params[3]);
FuzzyQuery q = new FuzzyQuery(new Term("field", query), minScore, prefix);
q.setRewriteMethod(new MultiTermQuery.TopTermsBoostOnlyBooleanQueryRewrite(pqSize));
int expectedResults = Integer.parseInt(reader.readLine());
TopDocs docs = searcher.search(q, expectedResults);
assertEquals(expectedResults, docs.totalHits);
for (int i = 0; i < expectedResults; i++) {
String scoreDoc[] = reader.readLine().split(",");
assertEquals(Integer.parseInt(scoreDoc[0]), docs.scoreDocs[i].doc);
assertEquals(Float.parseFloat(scoreDoc[1]), docs.scoreDocs[i].score, epsilon);
}
}
r.close();
dir.close();
}
|
public void testRandomPhrases() throws Exception {
Directory dir = newDirectory();
Analyzer analyzer = new MockAnalyzer(random);
RandomIndexWriter w = new RandomIndexWriter(random, dir, newIndexWriterConfig(TEST_VERSION_CURRENT, analyzer).setMergePolicy(newLogMergePolicy()));
List<List<String>> docs = new ArrayList<List<String>>();
Document d = new Document();
Field f = newField("f", "", TextField.TYPE_UNSTORED);
d.add(f);
Random r = random;
int NUM_DOCS = atLeast(10);
for (int i = 0; i < NUM_DOCS; i++) {
// must be > 4096 so it spans multiple chunks
int termCount = _TestUtil.nextInt(random, 4097, 8200);
List<String> doc = new ArrayList<String>();
StringBuilder sb = new StringBuilder();
while(doc.size() < termCount) {
if (r.nextInt(5) == 1 || docs.size() == 0) {
// make new non-empty-string term
String term;
while(true) {
term = _TestUtil.randomUnicodeString(r);
if (term.length() > 0) {
break;
}
}
TokenStream ts = analyzer.tokenStream("ignore", new StringReader(term));
CharTermAttribute termAttr = ts.addAttribute(CharTermAttribute.class);
ts.reset();
while(ts.incrementToken()) {
String text = termAttr.toString();
doc.add(text);
sb.append(text).append(' ');
}
ts.end();
ts.close();
} else {
// pick existing sub-phrase
List<String> lastDoc = docs.get(r.nextInt(docs.size()));
int len = _TestUtil.nextInt(r, 1, 10);
int start = r.nextInt(lastDoc.size()-len);
for(int k=start;k<start+len;k++) {
String t = lastDoc.get(k);
doc.add(t);
sb.append(t).append(' ');
}
}
}
docs.add(doc);
f.setValue(sb.toString());
w.addDocument(d);
}
IndexReader reader = w.getReader();
IndexSearcher s = newSearcher(reader);
w.close();
// now search
int num = atLeast(10);
for(int i=0;i<num;i++) {
int docID = r.nextInt(docs.size());
List<String> doc = docs.get(docID);
final int numTerm = _TestUtil.nextInt(r, 2, 20);
final int start = r.nextInt(doc.size()-numTerm);
PhraseQuery pq = new PhraseQuery();
StringBuilder sb = new StringBuilder();
for(int t=start;t<start+numTerm;t++) {
pq.add(new Term("f", doc.get(t)));
sb.append(doc.get(t)).append(' ');
}
TopDocs hits = s.search(pq, NUM_DOCS);
boolean found = false;
for(int j=0;j<hits.scoreDocs.length;j++) {
if (hits.scoreDocs[j].doc == docID) {
found = true;
break;
}
}
assertTrue("phrase '" + sb + "' not found; start=" + start, found);
}
reader.close();
dir.close();
}
| public void testRandomPhrases() throws Exception {
Directory dir = newDirectory();
Analyzer analyzer = new MockAnalyzer(random);
RandomIndexWriter w = new RandomIndexWriter(random, dir, newIndexWriterConfig(TEST_VERSION_CURRENT, analyzer).setMergePolicy(newLogMergePolicy()));
List<List<String>> docs = new ArrayList<List<String>>();
Document d = new Document();
Field f = newField("f", "", TextField.TYPE_UNSTORED);
d.add(f);
Random r = random;
int NUM_DOCS = atLeast(10);
for (int i = 0; i < NUM_DOCS; i++) {
// must be > 4096 so it spans multiple chunks
int termCount = _TestUtil.nextInt(random, 4097, 8200);
List<String> doc = new ArrayList<String>();
StringBuilder sb = new StringBuilder();
while(doc.size() < termCount) {
if (r.nextInt(5) == 1 || docs.size() == 0) {
// make new non-empty-string term
String term;
while(true) {
term = _TestUtil.randomUnicodeString(r);
if (term.length() > 0) {
break;
}
}
TokenStream ts = analyzer.tokenStream("ignore", new StringReader(term));
CharTermAttribute termAttr = ts.addAttribute(CharTermAttribute.class);
ts.reset();
while(ts.incrementToken()) {
String text = termAttr.toString();
doc.add(text);
sb.append(text).append(' ');
}
ts.end();
ts.close();
} else {
// pick existing sub-phrase
List<String> lastDoc = docs.get(r.nextInt(docs.size()));
int len = _TestUtil.nextInt(r, 1, 10);
int start = r.nextInt(lastDoc.size()-len);
for(int k=start;k<start+len;k++) {
String t = lastDoc.get(k);
doc.add(t);
sb.append(t).append(' ');
}
}
}
docs.add(doc);
f.setStringValue(sb.toString());
w.addDocument(d);
}
IndexReader reader = w.getReader();
IndexSearcher s = newSearcher(reader);
w.close();
// now search
int num = atLeast(10);
for(int i=0;i<num;i++) {
int docID = r.nextInt(docs.size());
List<String> doc = docs.get(docID);
final int numTerm = _TestUtil.nextInt(r, 2, 20);
final int start = r.nextInt(doc.size()-numTerm);
PhraseQuery pq = new PhraseQuery();
StringBuilder sb = new StringBuilder();
for(int t=start;t<start+numTerm;t++) {
pq.add(new Term("f", doc.get(t)));
sb.append(doc.get(t)).append(' ');
}
TopDocs hits = s.search(pq, NUM_DOCS);
boolean found = false;
for(int j=0;j<hits.scoreDocs.length;j++) {
if (hits.scoreDocs[j].doc == docID) {
found = true;
break;
}
}
assertTrue("phrase '" + sb + "' not found; start=" + start, found);
}
reader.close();
dir.close();
}
|
public void setUp() throws Exception {
super.setUp();
dir = newDirectory();
fieldName = random.nextBoolean() ? "field" : ""; // sometimes use an empty string as field name
RandomIndexWriter writer = new RandomIndexWriter(random, dir,
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random, MockTokenizer.KEYWORD, false))
.setMaxBufferedDocs(_TestUtil.nextInt(random, 50, 1000)));
Document doc = new Document();
Field field = newField(fieldName, "", StringField.TYPE_UNSTORED);
doc.add(field);
List<String> terms = new ArrayList<String>();
int num = atLeast(200);
for (int i = 0; i < num; i++) {
String s = _TestUtil.randomUnicodeString(random);
field.setValue(s);
terms.add(s);
writer.addDocument(doc);
}
if (VERBOSE) {
// utf16 order
Collections.sort(terms);
System.out.println("UTF16 order:");
for(String s : terms) {
System.out.println(" " + UnicodeUtil.toHexString(s));
}
}
reader = writer.getReader();
searcher1 = newSearcher(reader);
searcher2 = newSearcher(reader);
writer.close();
}
| public void setUp() throws Exception {
super.setUp();
dir = newDirectory();
fieldName = random.nextBoolean() ? "field" : ""; // sometimes use an empty string as field name
RandomIndexWriter writer = new RandomIndexWriter(random, dir,
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random, MockTokenizer.KEYWORD, false))
.setMaxBufferedDocs(_TestUtil.nextInt(random, 50, 1000)));
Document doc = new Document();
Field field = newField(fieldName, "", StringField.TYPE_UNSTORED);
doc.add(field);
List<String> terms = new ArrayList<String>();
int num = atLeast(200);
for (int i = 0; i < num; i++) {
String s = _TestUtil.randomUnicodeString(random);
field.setStringValue(s);
terms.add(s);
writer.addDocument(doc);
}
if (VERBOSE) {
// utf16 order
Collections.sort(terms);
System.out.println("UTF16 order:");
for(String s : terms) {
System.out.println(" " + UnicodeUtil.toHexString(s));
}
}
reader = writer.getReader();
searcher1 = newSearcher(reader);
searcher2 = newSearcher(reader);
writer.close();
}
|
public void setUp() throws Exception {
super.setUp();
// we generate aweful regexps: good for testing.
// but for preflex codec, the test can be very slow, so use less iterations.
numIterations = Codec.getDefault().getName().equals("Lucene3x") ? 10 * RANDOM_MULTIPLIER : atLeast(50);
dir = newDirectory();
RandomIndexWriter writer = new RandomIndexWriter(random, dir,
newIndexWriterConfig(TEST_VERSION_CURRENT,
new MockAnalyzer(random, MockTokenizer.KEYWORD, false))
.setMaxBufferedDocs(_TestUtil.nextInt(random, 50, 1000)));
Document doc = new Document();
Field field = newField("field", "", StringField.TYPE_STORED);
doc.add(field);
terms = new TreeSet<BytesRef>();
int num = atLeast(200);
for (int i = 0; i < num; i++) {
String s = _TestUtil.randomUnicodeString(random);
field.setValue(s);
terms.add(new BytesRef(s));
writer.addDocument(doc);
}
termsAutomaton = DaciukMihovAutomatonBuilder.build(terms);
reader = writer.getReader();
searcher = newSearcher(reader);
writer.close();
}
| public void setUp() throws Exception {
super.setUp();
// we generate aweful regexps: good for testing.
// but for preflex codec, the test can be very slow, so use less iterations.
numIterations = Codec.getDefault().getName().equals("Lucene3x") ? 10 * RANDOM_MULTIPLIER : atLeast(50);
dir = newDirectory();
RandomIndexWriter writer = new RandomIndexWriter(random, dir,
newIndexWriterConfig(TEST_VERSION_CURRENT,
new MockAnalyzer(random, MockTokenizer.KEYWORD, false))
.setMaxBufferedDocs(_TestUtil.nextInt(random, 50, 1000)));
Document doc = new Document();
Field field = newField("field", "", StringField.TYPE_STORED);
doc.add(field);
terms = new TreeSet<BytesRef>();
int num = atLeast(200);
for (int i = 0; i < num; i++) {
String s = _TestUtil.randomUnicodeString(random);
field.setStringValue(s);
terms.add(new BytesRef(s));
writer.addDocument(doc);
}
termsAutomaton = DaciukMihovAutomatonBuilder.build(terms);
reader = writer.getReader();
searcher = newSearcher(reader);
writer.close();
}
|
public void setUp() throws Exception {
super.setUp();
dir = newDirectory();
RandomIndexWriter writer = new RandomIndexWriter(random, dir,
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random, MockTokenizer.KEYWORD, false))
.setMaxBufferedDocs(_TestUtil.nextInt(random, 50, 1000)));
Document doc = new Document();
Field field = newField("field", "", StringField.TYPE_UNSTORED);
doc.add(field);
// we generate aweful prefixes: good for testing.
// but for preflex codec, the test can be very slow, so use less iterations.
int num = atLeast(10);
for (int i = 0; i < num; i++) {
field.setValue(_TestUtil.randomUnicodeString(random, 10));
writer.addDocument(doc);
}
reader = writer.getReader();
searcher = newSearcher(reader);
writer.close();
}
| public void setUp() throws Exception {
super.setUp();
dir = newDirectory();
RandomIndexWriter writer = new RandomIndexWriter(random, dir,
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random, MockTokenizer.KEYWORD, false))
.setMaxBufferedDocs(_TestUtil.nextInt(random, 50, 1000)));
Document doc = new Document();
Field field = newField("field", "", StringField.TYPE_UNSTORED);
doc.add(field);
// we generate aweful prefixes: good for testing.
// but for preflex codec, the test can be very slow, so use less iterations.
int num = atLeast(10);
for (int i = 0; i < num; i++) {
field.setStringValue(_TestUtil.randomUnicodeString(random, 10));
writer.addDocument(doc);
}
reader = writer.getReader();
searcher = newSearcher(reader);
writer.close();
}
|
public void testCustomEncoder() throws Exception {
Directory dir = newDirectory();
IndexWriterConfig config = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random));
config.setSimilarity(new CustomNormEncodingSimilarity());
RandomIndexWriter writer = new RandomIndexWriter(random, dir, config);
Document doc = new Document();
Field foo = newField("foo", "", TextField.TYPE_UNSTORED);
Field bar = newField("bar", "", TextField.TYPE_UNSTORED);
doc.add(foo);
doc.add(bar);
for (int i = 0; i < 100; i++) {
bar.setValue("singleton");
writer.addDocument(doc);
}
| public void testCustomEncoder() throws Exception {
Directory dir = newDirectory();
IndexWriterConfig config = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random));
config.setSimilarity(new CustomNormEncodingSimilarity());
RandomIndexWriter writer = new RandomIndexWriter(random, dir, config);
Document doc = new Document();
Field foo = newField("foo", "", TextField.TYPE_UNSTORED);
Field bar = newField("bar", "", TextField.TYPE_UNSTORED);
doc.add(foo);
doc.add(bar);
for (int i = 0; i < 100; i++) {
bar.setStringValue("singleton");
writer.addDocument(doc);
}
|
public void setUp() throws Exception {
super.setUp();
dir = newDirectory();
IndexWriterConfig config = newIndexWriterConfig(TEST_VERSION_CURRENT,
new MockAnalyzer(random, MockTokenizer.SIMPLE, true)).setMergePolicy(newLogMergePolicy());
config.setSimilarity(new TestSimilarity());
RandomIndexWriter writer = new RandomIndexWriter(random, dir, config);
Document doc = new Document();
Field foo = newField("foo", "", TextField.TYPE_UNSTORED);
doc.add(foo);
for (int i = 0; i < 100; i++) {
foo.setValue(addValue());
writer.addDocument(doc);
}
reader = writer.getReader();
writer.close();
}
| public void setUp() throws Exception {
super.setUp();
dir = newDirectory();
IndexWriterConfig config = newIndexWriterConfig(TEST_VERSION_CURRENT,
new MockAnalyzer(random, MockTokenizer.SIMPLE, true)).setMergePolicy(newLogMergePolicy());
config.setSimilarity(new TestSimilarity());
RandomIndexWriter writer = new RandomIndexWriter(random, dir, config);
Document doc = new Document();
Field foo = newField("foo", "", TextField.TYPE_UNSTORED);
doc.add(foo);
for (int i = 0; i < 100; i++) {
foo.setStringValue(addValue());
writer.addDocument(doc);
}
reader = writer.getReader();
writer.close();
}
|
public void testRollingUpdates() throws Exception {
final MockDirectoryWrapper dir = newDirectory();
dir.setCheckIndexOnClose(false); // we use a custom codec provider
final LineFileDocs docs = new LineFileDocs(random);
//provider.register(new MemoryCodec());
if ( (!"Lucene3x".equals(Codec.getDefault().getName())) && random.nextBoolean()) {
Codec.setDefault(_TestUtil.alwaysPostingsFormat(new MemoryPostingsFormat(random.nextBoolean())));
}
final IndexWriter w = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)));
final int SIZE = atLeast(20);
int id = 0;
IndexReader r = null;
final int numUpdates = (int) (SIZE * (2+(TEST_NIGHTLY ? 200*random.nextDouble() : 5*random.nextDouble())));
if (VERBOSE) {
System.out.println("TEST: numUpdates=" + numUpdates);
}
for(int docIter=0;docIter<numUpdates;docIter++) {
final Document doc = docs.nextDoc();
final String myID = ""+id;
if (id == SIZE-1) {
id = 0;
} else {
id++;
}
((Field) doc.getField("docid")).setValue(myID);
w.updateDocument(new Term("docid", myID), doc);
if (docIter >= SIZE && random.nextInt(50) == 17) {
if (r != null) {
r.close();
}
final boolean applyDeletions = random.nextBoolean();
r = w.getReader(applyDeletions);
assertTrue("applyDeletions=" + applyDeletions + " r.numDocs()=" + r.numDocs() + " vs SIZE=" + SIZE, !applyDeletions || r.numDocs() == SIZE);
}
}
if (r != null) {
r.close();
}
w.commit();
assertEquals(SIZE, w.numDocs());
w.close();
docs.close();
_TestUtil.checkIndex(dir);
dir.close();
}
| public void testRollingUpdates() throws Exception {
final MockDirectoryWrapper dir = newDirectory();
dir.setCheckIndexOnClose(false); // we use a custom codec provider
final LineFileDocs docs = new LineFileDocs(random);
//provider.register(new MemoryCodec());
if ( (!"Lucene3x".equals(Codec.getDefault().getName())) && random.nextBoolean()) {
Codec.setDefault(_TestUtil.alwaysPostingsFormat(new MemoryPostingsFormat(random.nextBoolean())));
}
final IndexWriter w = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)));
final int SIZE = atLeast(20);
int id = 0;
IndexReader r = null;
final int numUpdates = (int) (SIZE * (2+(TEST_NIGHTLY ? 200*random.nextDouble() : 5*random.nextDouble())));
if (VERBOSE) {
System.out.println("TEST: numUpdates=" + numUpdates);
}
for(int docIter=0;docIter<numUpdates;docIter++) {
final Document doc = docs.nextDoc();
final String myID = ""+id;
if (id == SIZE-1) {
id = 0;
} else {
id++;
}
((Field) doc.getField("docid")).setStringValue(myID);
w.updateDocument(new Term("docid", myID), doc);
if (docIter >= SIZE && random.nextInt(50) == 17) {
if (r != null) {
r.close();
}
final boolean applyDeletions = random.nextBoolean();
r = w.getReader(applyDeletions);
assertTrue("applyDeletions=" + applyDeletions + " r.numDocs()=" + r.numDocs() + " vs SIZE=" + SIZE, !applyDeletions || r.numDocs() == SIZE);
}
}
if (r != null) {
r.close();
}
w.commit();
assertEquals(SIZE, w.numDocs());
w.close();
docs.close();
_TestUtil.checkIndex(dir);
dir.close();
}
|
public void testRandomStoredFields() throws IOException {
Directory dir = newDirectory();
Random rand = random;
RandomIndexWriter w = new RandomIndexWriter(rand, dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMaxBufferedDocs(_TestUtil.nextInt(rand, 5, 20)));
//w.w.setUseCompoundFile(false);
final int docCount = atLeast(200);
final int fieldCount = _TestUtil.nextInt(rand, 1, 5);
final List<Integer> fieldIDs = new ArrayList<Integer>();
FieldType customType = new FieldType(TextField.TYPE_STORED);
customType.setTokenized(false);
Field idField = newField("id", "", customType);
for(int i=0;i<fieldCount;i++) {
fieldIDs.add(i);
}
final Map<String,Document> docs = new HashMap<String,Document>();
if (VERBOSE) {
System.out.println("TEST: build index docCount=" + docCount);
}
FieldType customType2 = new FieldType();
customType2.setStored(true);
for(int i=0;i<docCount;i++) {
Document doc = new Document();
doc.add(idField);
final String id = ""+i;
idField.setValue(id);
docs.put(id, doc);
if (VERBOSE) {
System.out.println("TEST: add doc id=" + id);
}
for(int field: fieldIDs) {
final String s;
if (rand.nextInt(4) != 3) {
s = _TestUtil.randomUnicodeString(rand, 1000);
doc.add(newField("f"+field, s, customType2));
} else {
s = null;
}
}
w.addDocument(doc);
if (rand.nextInt(50) == 17) {
// mixup binding of field name -> Number every so often
Collections.shuffle(fieldIDs);
}
if (rand.nextInt(5) == 3 && i > 0) {
final String delID = ""+rand.nextInt(i);
if (VERBOSE) {
System.out.println("TEST: delete doc id=" + delID);
}
w.deleteDocuments(new Term("id", delID));
docs.remove(delID);
}
}
if (VERBOSE) {
System.out.println("TEST: " + docs.size() + " docs in index; now load fields");
}
if (docs.size() > 0) {
String[] idsList = docs.keySet().toArray(new String[docs.size()]);
for(int x=0;x<2;x++) {
IndexReader r = w.getReader();
IndexSearcher s = newSearcher(r);
if (VERBOSE) {
System.out.println("TEST: cycle x=" + x + " r=" + r);
}
int num = atLeast(1000);
for(int iter=0;iter<num;iter++) {
String testID = idsList[rand.nextInt(idsList.length)];
if (VERBOSE) {
System.out.println("TEST: test id=" + testID);
}
TopDocs hits = s.search(new TermQuery(new Term("id", testID)), 1);
assertEquals(1, hits.totalHits);
Document doc = r.document(hits.scoreDocs[0].doc);
Document docExp = docs.get(testID);
for(int i=0;i<fieldCount;i++) {
assertEquals("doc " + testID + ", field f" + fieldCount + " is wrong", docExp.get("f"+i), doc.get("f"+i));
}
}
r.close();
w.forceMerge(1);
}
}
w.close();
dir.close();
}
| public void testRandomStoredFields() throws IOException {
Directory dir = newDirectory();
Random rand = random;
RandomIndexWriter w = new RandomIndexWriter(rand, dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMaxBufferedDocs(_TestUtil.nextInt(rand, 5, 20)));
//w.w.setUseCompoundFile(false);
final int docCount = atLeast(200);
final int fieldCount = _TestUtil.nextInt(rand, 1, 5);
final List<Integer> fieldIDs = new ArrayList<Integer>();
FieldType customType = new FieldType(TextField.TYPE_STORED);
customType.setTokenized(false);
Field idField = newField("id", "", customType);
for(int i=0;i<fieldCount;i++) {
fieldIDs.add(i);
}
final Map<String,Document> docs = new HashMap<String,Document>();
if (VERBOSE) {
System.out.println("TEST: build index docCount=" + docCount);
}
FieldType customType2 = new FieldType();
customType2.setStored(true);
for(int i=0;i<docCount;i++) {
Document doc = new Document();
doc.add(idField);
final String id = ""+i;
idField.setStringValue(id);
docs.put(id, doc);
if (VERBOSE) {
System.out.println("TEST: add doc id=" + id);
}
for(int field: fieldIDs) {
final String s;
if (rand.nextInt(4) != 3) {
s = _TestUtil.randomUnicodeString(rand, 1000);
doc.add(newField("f"+field, s, customType2));
} else {
s = null;
}
}
w.addDocument(doc);
if (rand.nextInt(50) == 17) {
// mixup binding of field name -> Number every so often
Collections.shuffle(fieldIDs);
}
if (rand.nextInt(5) == 3 && i > 0) {
final String delID = ""+rand.nextInt(i);
if (VERBOSE) {
System.out.println("TEST: delete doc id=" + delID);
}
w.deleteDocuments(new Term("id", delID));
docs.remove(delID);
}
}
if (VERBOSE) {
System.out.println("TEST: " + docs.size() + " docs in index; now load fields");
}
if (docs.size() > 0) {
String[] idsList = docs.keySet().toArray(new String[docs.size()]);
for(int x=0;x<2;x++) {
IndexReader r = w.getReader();
IndexSearcher s = newSearcher(r);
if (VERBOSE) {
System.out.println("TEST: cycle x=" + x + " r=" + r);
}
int num = atLeast(1000);
for(int iter=0;iter<num;iter++) {
String testID = idsList[rand.nextInt(idsList.length)];
if (VERBOSE) {
System.out.println("TEST: test id=" + testID);
}
TopDocs hits = s.search(new TermQuery(new Term("id", testID)), 1);
assertEquals(1, hits.totalHits);
Document doc = r.document(hits.scoreDocs[0].doc);
Document docExp = docs.get(testID);
for(int i=0;i<fieldCount;i++) {
assertEquals("doc " + testID + ", field f" + fieldCount + " is wrong", docExp.get("f"+i), doc.get("f"+i));
}
}
r.close();
w.forceMerge(1);
}
}
w.close();
dir.close();
}
|
public void run() {
try {
final Document doc = new Document();
DirectoryReader r = IndexReader.open(dir);
Field f = newField("f", "", StringField.TYPE_UNSTORED);
doc.add(f);
int count = 0;
do {
if (failed.get()) break;
for(int j=0;j<10;j++) {
final String s = finalI + "_" + String.valueOf(count++);
f.setValue(s);
w.addDocument(doc);
w.commit();
DirectoryReader r2 = DirectoryReader.openIfChanged(r);
assertNotNull(r2);
assertTrue(r2 != r);
r.close();
r = r2;
assertEquals("term=f:" + s + "; r=" + r, 1, r.docFreq(new Term("f", s)));
}
} while(System.currentTimeMillis() < endTime);
r.close();
} catch (Throwable t) {
failed.set(true);
throw new RuntimeException(t);
}
}
};
threads[i].start();
}
for(int i=0;i<NUM_THREADS;i++) {
threads[i].join();
}
assertFalse(failed.get());
w.close();
dir.close();
}
| public void run() {
try {
final Document doc = new Document();
DirectoryReader r = IndexReader.open(dir);
Field f = newField("f", "", StringField.TYPE_UNSTORED);
doc.add(f);
int count = 0;
do {
if (failed.get()) break;
for(int j=0;j<10;j++) {
final String s = finalI + "_" + String.valueOf(count++);
f.setStringValue(s);
w.addDocument(doc);
w.commit();
DirectoryReader r2 = DirectoryReader.openIfChanged(r);
assertNotNull(r2);
assertTrue(r2 != r);
r.close();
r = r2;
assertEquals("term=f:" + s + "; r=" + r, 1, r.docFreq(new Term("f", s)));
}
} while(System.currentTimeMillis() < endTime);
r.close();
} catch (Throwable t) {
failed.set(true);
throw new RuntimeException(t);
}
}
};
threads[i].start();
}
for(int i=0;i<NUM_THREADS;i++) {
threads[i].join();
}
assertFalse(failed.get());
w.close();
dir.close();
}
|
public void run() {
final Document doc = new Document();
doc.add(newField(r, "content1", "aaa bbb ccc ddd", TextField.TYPE_STORED));
doc.add(newField(r, "content6", "aaa bbb ccc ddd", DocCopyIterator.custom1));
doc.add(newField(r, "content2", "aaa bbb ccc ddd", DocCopyIterator.custom2));
doc.add(newField(r, "content3", "aaa bbb ccc ddd", DocCopyIterator.custom3));
doc.add(newField(r, "content4", "aaa bbb ccc ddd", TextField.TYPE_UNSTORED));
doc.add(newField(r, "content5", "aaa bbb ccc ddd", StringField.TYPE_UNSTORED));
doc.add(newField(r, "content7", "aaa bbb ccc ddd", DocCopyIterator.custom4));
final Field idField = newField(r, "id", "", DocCopyIterator.custom2);
doc.add(idField);
final long stopTime = System.currentTimeMillis() + 500;
do {
if (VERBOSE) {
System.out.println(Thread.currentThread().getName() + ": TEST: IndexerThread: cycle");
}
doFail.set(this);
final String id = ""+r.nextInt(50);
idField.setValue(id);
Term idTerm = new Term("id", id);
try {
if (r.nextBoolean()) {
writer.updateDocuments(idTerm, new DocCopyIterator(doc, _TestUtil.nextInt(r, 1, 20)));
} else {
writer.updateDocument(idTerm, doc);
}
} catch (RuntimeException re) {
if (VERBOSE) {
System.out.println(Thread.currentThread().getName() + ": EXC: ");
re.printStackTrace(System.out);
}
try {
_TestUtil.checkIndex(writer.getDirectory());
} catch (IOException ioe) {
System.out.println(Thread.currentThread().getName() + ": unexpected exception1");
ioe.printStackTrace(System.out);
failure = ioe;
break;
}
} catch (Throwable t) {
System.out.println(Thread.currentThread().getName() + ": unexpected exception2");
t.printStackTrace(System.out);
failure = t;
break;
}
doFail.set(null);
// After a possible exception (above) I should be able
// to add a new document without hitting an
// exception:
try {
writer.updateDocument(idTerm, doc);
} catch (Throwable t) {
System.out.println(Thread.currentThread().getName() + ": unexpected exception3");
t.printStackTrace(System.out);
failure = t;
break;
}
} while(System.currentTimeMillis() < stopTime);
}
| public void run() {
final Document doc = new Document();
doc.add(newField(r, "content1", "aaa bbb ccc ddd", TextField.TYPE_STORED));
doc.add(newField(r, "content6", "aaa bbb ccc ddd", DocCopyIterator.custom1));
doc.add(newField(r, "content2", "aaa bbb ccc ddd", DocCopyIterator.custom2));
doc.add(newField(r, "content3", "aaa bbb ccc ddd", DocCopyIterator.custom3));
doc.add(newField(r, "content4", "aaa bbb ccc ddd", TextField.TYPE_UNSTORED));
doc.add(newField(r, "content5", "aaa bbb ccc ddd", StringField.TYPE_UNSTORED));
doc.add(newField(r, "content7", "aaa bbb ccc ddd", DocCopyIterator.custom4));
final Field idField = newField(r, "id", "", DocCopyIterator.custom2);
doc.add(idField);
final long stopTime = System.currentTimeMillis() + 500;
do {
if (VERBOSE) {
System.out.println(Thread.currentThread().getName() + ": TEST: IndexerThread: cycle");
}
doFail.set(this);
final String id = ""+r.nextInt(50);
idField.setStringValue(id);
Term idTerm = new Term("id", id);
try {
if (r.nextBoolean()) {
writer.updateDocuments(idTerm, new DocCopyIterator(doc, _TestUtil.nextInt(r, 1, 20)));
} else {
writer.updateDocument(idTerm, doc);
}
} catch (RuntimeException re) {
if (VERBOSE) {
System.out.println(Thread.currentThread().getName() + ": EXC: ");
re.printStackTrace(System.out);
}
try {
_TestUtil.checkIndex(writer.getDirectory());
} catch (IOException ioe) {
System.out.println(Thread.currentThread().getName() + ": unexpected exception1");
ioe.printStackTrace(System.out);
failure = ioe;
break;
}
} catch (Throwable t) {
System.out.println(Thread.currentThread().getName() + ": unexpected exception2");
t.printStackTrace(System.out);
failure = t;
break;
}
doFail.set(null);
// After a possible exception (above) I should be able
// to add a new document without hitting an
// exception:
try {
writer.updateDocument(idTerm, doc);
} catch (Throwable t) {
System.out.println(Thread.currentThread().getName() + ": unexpected exception3");
t.printStackTrace(System.out);
failure = t;
break;
}
} while(System.currentTimeMillis() < stopTime);
}
|
private void updateStatistics()
throws StandardException {
ConglomerateDescriptor[] cds;
td = dd.getTableDescriptor(tableId);
if (updateStatisticsAll) {
cds = td.getConglomerateDescriptors();
} else {
cds = new ConglomerateDescriptor[1];
cds[0] = dd.getConglomerateDescriptor(
indexNameForStatistics, sd, false);
}
dd.getIndexStatsRefresher(false).runExplicitly(
lcc, td, cds, "ALTER TABLE");
}
| private void updateStatistics()
throws StandardException {
ConglomerateDescriptor[] cds;
td = dd.getTableDescriptor(tableId);
if (updateStatisticsAll) {
cds = null;
} else {
cds = new ConglomerateDescriptor[1];
cds[0] = dd.getConglomerateDescriptor(
indexNameForStatistics, sd, false);
}
dd.getIndexStatsRefresher(false).runExplicitly(
lcc, td, cds, "ALTER TABLE");
}
|
public void testThreadInterruptDeadlock() throws Exception {
IndexerThreadInterrupt t = new IndexerThreadInterrupt();
t.setDaemon(true);
t.start();
// Force class loader to load ThreadInterruptedException
// up front... else we can see a false failure if 2nd
// interrupt arrives while class loader is trying to
// init this class (in servicing a first interrupt):
assertTrue(new ThreadInterruptedException(new InterruptedException()).getCause() instanceof InterruptedException);
// issue 300 interrupts to child thread
final int numInterrupts = atLeast(3000);
int i = 0;
while(i < numInterrupts) {
// TODO: would be nice to also sometimes interrupt the
// CMS merge threads too ...
Thread.sleep(10);
if (t.allowInterrupt) {
i++;
t.interrupt();
}
if (!t.isAlive()) {
break;
}
}
t.finish = true;
t.join();
assertFalse(t.failed);
}
| public void testThreadInterruptDeadlock() throws Exception {
IndexerThreadInterrupt t = new IndexerThreadInterrupt();
t.setDaemon(true);
t.start();
// Force class loader to load ThreadInterruptedException
// up front... else we can see a false failure if 2nd
// interrupt arrives while class loader is trying to
// init this class (in servicing a first interrupt):
assertTrue(new ThreadInterruptedException(new InterruptedException()).getCause() instanceof InterruptedException);
// issue 300 interrupts to child thread
final int numInterrupts = atLeast(300);
int i = 0;
while(i < numInterrupts) {
// TODO: would be nice to also sometimes interrupt the
// CMS merge threads too ...
Thread.sleep(10);
if (t.allowInterrupt) {
i++;
t.interrupt();
}
if (!t.isAlive()) {
break;
}
}
t.finish = true;
t.join();
assertFalse(t.failed);
}
|
public static void main(String[] args) throws IOException,
ClassNotFoundException, IllegalAccessException, InstantiationException, OptionException {
final DefaultOptionBuilder obuilder = new DefaultOptionBuilder();
final ArgumentBuilder abuilder = new ArgumentBuilder();
final GroupBuilder gbuilder = new GroupBuilder();
Option pathOpt = obuilder.withLongName("path").withRequired(true).withArgument(
abuilder.withName("path").withMinimum(1).withMaximum(1).create()).
withDescription("The local file system path").withShortName("p").create();
Option dirOpt = obuilder.withLongName("testDir").withRequired(true).withArgument(
abuilder.withName("testDir").withMinimum(1).withMaximum(1).create()).
withDescription("The directory where test documents resides in").withShortName("t").create();
Option encodingOpt = obuilder.withLongName("encoding").withArgument(
abuilder.withName("encoding").withMinimum(1).withMaximum(1).create()).
withDescription("The file encoding. Defaults to UTF-8").withShortName("e").create();
Option analyzerOpt = obuilder.withLongName("analyzer").withArgument(
abuilder.withName("analyzer").withMinimum(1).withMaximum(1).create()).
withDescription("The Analyzer to use").withShortName("a").create();
Option defaultCatOpt = obuilder.withLongName("defaultCat").withArgument(
abuilder.withName("defaultCat").withMinimum(1).withMaximum(1).create()).
withDescription("The default category").withShortName("d").create();
Option gramSizeOpt = obuilder.withLongName("gramSize").withRequired(true).withArgument(
abuilder.withName("gramSize").withMinimum(1).withMaximum(1).create()).
withDescription("Size of the n-gram").withShortName("ng").create();
Option typeOpt = obuilder.withLongName("classifierType").withRequired(true).withArgument(
abuilder.withName("classifierType").withMinimum(1).withMaximum(1).create()).
withDescription("Type of classifier: bayes|cbayes").withShortName("type").create();
Group group = gbuilder.withName("Options").withOption(analyzerOpt).withOption(defaultCatOpt).withOption(dirOpt).withOption(encodingOpt).withOption(gramSizeOpt).withOption(pathOpt)
.withOption(typeOpt).create();
CommandLine cmdLine;
Parser parser = new Parser();
parser.setGroup(group);
cmdLine = parser.parse(args);
SequenceFileModelReader reader = new SequenceFileModelReader();
JobConf conf = new JobConf(TestClassifier.class);
Map<String, Path> modelPaths = new HashMap<String, Path>();
String modelBasePath = (String) cmdLine.getValue(pathOpt);
modelPaths.put("sigma_j", new Path(modelBasePath + "/trainer-weights/Sigma_j/part-*"));
modelPaths.put("sigma_k", new Path(modelBasePath + "/trainer-weights/Sigma_k/part-*"));
modelPaths.put("sigma_kSigma_j", new Path(modelBasePath + "/trainer-weights/Sigma_kSigma_j/part-*"));
modelPaths.put("thetaNormalizer", new Path(modelBasePath + "/trainer-thetaNormalizer/part-*"));
modelPaths.put("weight", new Path(modelBasePath + "/trainer-tfIdf/trainer-tfIdf/part-*"));
FileSystem fs = FileSystem.get(conf);
log.info("Loading model from: {}", modelPaths);
Model model;
Classifier classifier;
String classifierType = (String) cmdLine.getValue(typeOpt);
if (classifierType.equalsIgnoreCase("bayes")) {
log.info("Testing Bayes Classifier");
model = new BayesModel();
classifier = new BayesClassifier();
} else if (classifierType.equalsIgnoreCase("cbayes")) {
log.info("Testing Complementary Bayes Classifier");
model = new CBayesModel();
classifier = new CBayesClassifier();
} else {
throw new IllegalArgumentException("Unrecognized classifier type: " + classifierType);
}
model = reader.loadModel(model, fs, modelPaths, conf);
log.info("Done loading model: # labels: {}", model.getLabels().size());
log.info("Done generating Model");
String defaultCat = "unknown";
if (cmdLine.hasOption(defaultCatOpt)) {
defaultCat = (String) cmdLine.getValue(defaultCatOpt);
}
String encoding = "UTF-8";
if (cmdLine.hasOption(encodingOpt)) {
encoding = (String) cmdLine.getValue(encodingOpt);
}
//Analyzer analyzer = null;
//if (cmdLine.hasOption(analyzerOpt)) {
//String className = (String) cmdLine.getValue(analyzerOpt);
//Class clazz = Class.forName(className);
//analyzer = (Analyzer) clazz.newInstance();
//}
//if (analyzer == null) {
// analyzer = new StandardAnalyzer();
//}
int gramSize = 1;
if (cmdLine.hasOption(gramSizeOpt)) {
gramSize = Integer.parseInt((String) cmdLine
.getValue(gramSizeOpt));
}
String testDirPath = (String) cmdLine.getValue(dirOpt);
File dir = new File(testDirPath);
File[] subdirs = dir.listFiles();
ResultAnalyzer resultAnalyzer = new ResultAnalyzer(model.getLabels());
if (subdirs != null) {
for (int loop = 0; loop < subdirs.length; loop++) {
String correctLabel = subdirs[loop].getName().split(".txt")[0];
BufferedReader fileReader = new BufferedReader(new InputStreamReader(
new FileInputStream(subdirs[loop].getPath()), encoding));
String line;
while ((line = fileReader.readLine()) != null) {
Map<String, List<String>> document = Model.generateNGrams(line, gramSize);
for (String labelName : document.keySet()) {
List<String> strings = document.get(labelName);
ClassifierResult classifiedLabel = classifier.classify(model,
strings.toArray(new String[strings.size()]),
defaultCat);
resultAnalyzer.addInstance(correctLabel, classifiedLabel);
}
}
log.info("{}\t{}\t{}/{}", new Object[] {
correctLabel,
resultAnalyzer.getConfusionMatrix().getAccuracy(correctLabel),
resultAnalyzer.getConfusionMatrix().getCorrect(correctLabel),
resultAnalyzer.getConfusionMatrix().getTotal(correctLabel)
});
}
}
| public static void main(String[] args) throws IOException,
ClassNotFoundException, IllegalAccessException, InstantiationException, OptionException {
final DefaultOptionBuilder obuilder = new DefaultOptionBuilder();
final ArgumentBuilder abuilder = new ArgumentBuilder();
final GroupBuilder gbuilder = new GroupBuilder();
Option pathOpt = obuilder.withLongName("path").withRequired(true).withArgument(
abuilder.withName("path").withMinimum(1).withMaximum(1).create()).
withDescription("The local file system path").withShortName("p").create();
Option dirOpt = obuilder.withLongName("testDir").withRequired(true).withArgument(
abuilder.withName("testDir").withMinimum(1).withMaximum(1).create()).
withDescription("The directory where test documents resides in").withShortName("t").create();
Option encodingOpt = obuilder.withLongName("encoding").withArgument(
abuilder.withName("encoding").withMinimum(1).withMaximum(1).create()).
withDescription("The file encoding. Defaults to UTF-8").withShortName("e").create();
Option analyzerOpt = obuilder.withLongName("analyzer").withArgument(
abuilder.withName("analyzer").withMinimum(1).withMaximum(1).create()).
withDescription("The Analyzer to use").withShortName("a").create();
Option defaultCatOpt = obuilder.withLongName("defaultCat").withArgument(
abuilder.withName("defaultCat").withMinimum(1).withMaximum(1).create()).
withDescription("The default category").withShortName("d").create();
Option gramSizeOpt = obuilder.withLongName("gramSize").withRequired(true).withArgument(
abuilder.withName("gramSize").withMinimum(1).withMaximum(1).create()).
withDescription("Size of the n-gram").withShortName("ng").create();
Option typeOpt = obuilder.withLongName("classifierType").withRequired(true).withArgument(
abuilder.withName("classifierType").withMinimum(1).withMaximum(1).create()).
withDescription("Type of classifier: bayes|cbayes").withShortName("type").create();
Group group = gbuilder.withName("Options").withOption(analyzerOpt).withOption(defaultCatOpt).withOption(dirOpt).withOption(encodingOpt).withOption(gramSizeOpt).withOption(pathOpt)
.withOption(typeOpt).create();
CommandLine cmdLine;
Parser parser = new Parser();
parser.setGroup(group);
cmdLine = parser.parse(args);
SequenceFileModelReader reader = new SequenceFileModelReader();
JobConf conf = new JobConf(TestClassifier.class);
Map<String, Path> modelPaths = new HashMap<String, Path>();
String modelBasePath = (String) cmdLine.getValue(pathOpt);
modelPaths.put("sigma_j", new Path(modelBasePath + "/trainer-weights/Sigma_j/part-*"));
modelPaths.put("sigma_k", new Path(modelBasePath + "/trainer-weights/Sigma_k/part-*"));
modelPaths.put("sigma_kSigma_j", new Path(modelBasePath + "/trainer-weights/Sigma_kSigma_j/part-*"));
modelPaths.put("thetaNormalizer", new Path(modelBasePath + "/trainer-thetaNormalizer/part-*"));
modelPaths.put("weight", new Path(modelBasePath + "/trainer-tfIdf/trainer-tfIdf/part-*"));
FileSystem fs = FileSystem.get(conf);
log.info("Loading model from: {}", modelPaths);
Model model;
Classifier classifier;
String classifierType = (String) cmdLine.getValue(typeOpt);
if (classifierType.equalsIgnoreCase("bayes")) {
log.info("Testing Bayes Classifier");
model = new BayesModel();
classifier = new BayesClassifier();
} else if (classifierType.equalsIgnoreCase("cbayes")) {
log.info("Testing Complementary Bayes Classifier");
model = new CBayesModel();
classifier = new CBayesClassifier();
} else {
throw new IllegalArgumentException("Unrecognized classifier type: " + classifierType);
}
model = reader.loadModel(model, fs, modelPaths, conf);
log.info("Done loading model: # labels: {}", model.getLabels().size());
log.info("Done generating Model");
String defaultCat = "unknown";
if (cmdLine.hasOption(defaultCatOpt)) {
defaultCat = (String) cmdLine.getValue(defaultCatOpt);
}
String encoding = "UTF-8";
if (cmdLine.hasOption(encodingOpt)) {
encoding = (String) cmdLine.getValue(encodingOpt);
}
//Analyzer analyzer = null;
//if (cmdLine.hasOption(analyzerOpt)) {
//String className = (String) cmdLine.getValue(analyzerOpt);
//Class clazz = Class.forName(className);
//analyzer = (Analyzer) clazz.newInstance();
//}
//if (analyzer == null) {
// analyzer = new StandardAnalyzer();
//}
int gramSize = 1;
if (cmdLine.hasOption(gramSizeOpt)) {
gramSize = Integer.parseInt((String) cmdLine
.getValue(gramSizeOpt));
}
String testDirPath = (String) cmdLine.getValue(dirOpt);
File dir = new File(testDirPath);
File[] subdirs = dir.listFiles();
ResultAnalyzer resultAnalyzer = new ResultAnalyzer(model.getLabels(), defaultCat);
if (subdirs != null) {
for (int loop = 0; loop < subdirs.length; loop++) {
String correctLabel = subdirs[loop].getName().split(".txt")[0];
BufferedReader fileReader = new BufferedReader(new InputStreamReader(
new FileInputStream(subdirs[loop].getPath()), encoding));
String line;
while ((line = fileReader.readLine()) != null) {
Map<String, List<String>> document = Model.generateNGrams(line, gramSize);
for (String labelName : document.keySet()) {
List<String> strings = document.get(labelName);
ClassifierResult classifiedLabel = classifier.classify(model,
strings.toArray(new String[strings.size()]),
defaultCat);
resultAnalyzer.addInstance(correctLabel, classifiedLabel);
}
}
log.info("{}\t{}\t{}/{}", new Object[] {
correctLabel,
resultAnalyzer.getConfusionMatrix().getAccuracy(correctLabel),
resultAnalyzer.getConfusionMatrix().getCorrect(correctLabel),
resultAnalyzer.getConfusionMatrix().getTotal(correctLabel)
});
}
}
|
public void test() {
BayesClassifier classifier = new BayesClassifier();
ClassifierResult result;
String[] document = new String[]{"aa", "ff"};
result = classifier.classify(model, document, "unknown");
assertTrue("category is null and it shouldn't be", result != null);
assertTrue(result + " is not equal to " + "e", result.getLabel().equals("e"));
document = new String[]{"ff"};
result = classifier.classify(model, document, "unknown");
assertTrue("category is null and it shouldn't be", result != null);
assertTrue(result + " is not equal to " + "unknown", result.getLabel().equals("unknown"));
document = new String[]{"cc"};
result = classifier.classify(model, document, "unknown");
assertTrue("category is null and it shouldn't be", result != null);
assertTrue(result + " is not equal to " + "d", result.getLabel().equals("d"));
}
| public void test() {
BayesClassifier classifier = new BayesClassifier();
ClassifierResult result;
String[] document = new String[]{"aa", "ff"};
result = classifier.classify(model, document, "unknown");
assertTrue("category is null and it shouldn't be", result != null);
assertTrue(result + " is not equal to " + "e", result.getLabel().equals("e"));
document = new String[]{"ff"};
result = classifier.classify(model, document, "unknown");
assertTrue("category is null and it shouldn't be", result != null);
assertTrue(result + " is not equal to " + "d", result.getLabel().equals("d"));//GSI: was unknown, but we now just pick the first cat
document = new String[]{"cc"};
result = classifier.classify(model, document, "unknown");
assertTrue("category is null and it shouldn't be", result != null);
assertTrue(result + " is not equal to " + "d", result.getLabel().equals("d"));
}
|
public void test() {
BayesClassifier classifier = new BayesClassifier();
ClassifierResult result;
String[] document = new String[]{"aa", "ff"};
result = classifier.classify(model, document, "unknown");
assertTrue("category is null and it shouldn't be", result != null);
assertTrue(result + " is not equal to " + "e", result.getLabel().equals("e"));
document = new String[]{"ff"};
result = classifier.classify(model, document, "unknown");
assertTrue("category is null and it shouldn't be", result != null);
assertTrue(result + " is not equal to " + "unknown", result.getLabel().equals("unknown"));
document = new String[]{"cc"};
result = classifier.classify(model, document, "unknown");
assertTrue("category is null and it shouldn't be", result != null);
assertTrue(result + " is not equal to " + "d", result.getLabel().equals("d"));
}
| public void test() {
BayesClassifier classifier = new BayesClassifier();
ClassifierResult result;
String[] document = new String[]{"aa", "ff"};
result = classifier.classify(model, document, "unknown");
assertTrue("category is null and it shouldn't be", result != null);
assertTrue(result + " is not equal to " + "e", result.getLabel().equals("e"));
document = new String[]{"ff"};
result = classifier.classify(model, document, "unknown");
assertTrue("category is null and it shouldn't be", result != null);
assertTrue(result + " is not equal to " + "d", result.getLabel().equals("d"));
document = new String[]{"cc"};
result = classifier.classify(model, document, "unknown");
assertTrue("category is null and it shouldn't be", result != null);
assertTrue(result + " is not equal to " + "d", result.getLabel().equals("d"));
}
|
public double documentProbability(Model model, String label, String[] document);
} | public double documentWeight(Model model, String label, String[] document);
} |
private void verify(AtomicReader r, int[][] idToOrds, BytesRef[] termsArray, BytesRef prefixRef) throws Exception {
final DocTermOrds dto = new DocTermOrds(r,
"field",
prefixRef,
Integer.MAX_VALUE,
_TestUtil.nextInt(random(), 2, 10));
final int[] docIDToID = FieldCache.DEFAULT.getInts(r, "id", false);
/*
for(int docID=0;docID<subR.maxDoc();docID++) {
System.out.println(" docID=" + docID + " id=" + docIDToID[docID]);
}
*/
if (VERBOSE) {
System.out.println("TEST: verify prefix=" + (prefixRef==null ? "null" : prefixRef.utf8ToString()));
System.out.println("TEST: all TERMS:");
TermsEnum allTE = MultiFields.getTerms(r, "field").iterator(null);
int ord = 0;
while(allTE.next() != null) {
System.out.println(" ord=" + (ord++) + " term=" + allTE.term().utf8ToString());
}
}
//final TermsEnum te = subR.fields().terms("field").iterator();
final TermsEnum te = dto.getOrdTermsEnum(r);
if (te == null) {
if (prefixRef == null) {
assertNull(MultiFields.getTerms(r, "field"));
} else {
Terms terms = MultiFields.getTerms(r, "field");
if (terms != null) {
TermsEnum termsEnum = terms.iterator(null);
TermsEnum.SeekStatus result = termsEnum.seekCeil(prefixRef, false);
if (result != TermsEnum.SeekStatus.END) {
assertFalse("term=" + termsEnum.term().utf8ToString() + " matches prefix=" + prefixRef.utf8ToString(), StringHelper.startsWith(termsEnum.term(), prefixRef));
} else {
// ok
}
} else {
// ok
}
}
return;
}
if (VERBOSE) {
System.out.println("TEST: TERMS:");
te.seekExact(0);
while(true) {
System.out.println(" ord=" + te.ord() + " term=" + te.term().utf8ToString());
if (te.next() == null) {
break;
}
}
}
TermOrdsIterator iter = null;
final int[] buffer = new int[5];
for(int docID=0;docID<r.maxDoc();docID++) {
if (VERBOSE) {
System.out.println("TEST: docID=" + docID + " of " + r.maxDoc() + " (id=" + docIDToID[docID] + ")");
}
iter = dto.lookup(docID, iter);
final int[] answers = idToOrds[docIDToID[docID]];
int upto = 0;
while(true) {
final int chunk = iter.read(buffer);
for(int idx=0;idx<chunk;idx++) {
te.seekExact((long) buffer[idx]);
final BytesRef expected = termsArray[answers[upto++]];
if (VERBOSE) {
System.out.println(" exp=" + expected.utf8ToString() + " actual=" + te.term().utf8ToString());
}
assertEquals("expected=" + expected.utf8ToString() + " actual=" + te.term().utf8ToString() + " ord=" + buffer[idx], expected, te.term());
}
if (chunk < buffer.length) {
assertEquals(answers.length, upto);
break;
}
}
}
}
| private void verify(AtomicReader r, int[][] idToOrds, BytesRef[] termsArray, BytesRef prefixRef) throws Exception {
final DocTermOrds dto = new DocTermOrds(r,
"field",
prefixRef,
Integer.MAX_VALUE,
_TestUtil.nextInt(random(), 2, 10));
final int[] docIDToID = FieldCache.DEFAULT.getInts(r, "id", false);
/*
for(int docID=0;docID<subR.maxDoc();docID++) {
System.out.println(" docID=" + docID + " id=" + docIDToID[docID]);
}
*/
if (VERBOSE) {
System.out.println("TEST: verify prefix=" + (prefixRef==null ? "null" : prefixRef.utf8ToString()));
System.out.println("TEST: all TERMS:");
TermsEnum allTE = MultiFields.getTerms(r, "field").iterator(null);
int ord = 0;
while(allTE.next() != null) {
System.out.println(" ord=" + (ord++) + " term=" + allTE.term().utf8ToString());
}
}
//final TermsEnum te = subR.fields().terms("field").iterator();
final TermsEnum te = dto.getOrdTermsEnum(r);
if (dto.numTerms() == 0) {
if (prefixRef == null) {
assertNull(MultiFields.getTerms(r, "field"));
} else {
Terms terms = MultiFields.getTerms(r, "field");
if (terms != null) {
TermsEnum termsEnum = terms.iterator(null);
TermsEnum.SeekStatus result = termsEnum.seekCeil(prefixRef, false);
if (result != TermsEnum.SeekStatus.END) {
assertFalse("term=" + termsEnum.term().utf8ToString() + " matches prefix=" + prefixRef.utf8ToString(), StringHelper.startsWith(termsEnum.term(), prefixRef));
} else {
// ok
}
} else {
// ok
}
}
return;
}
if (VERBOSE) {
System.out.println("TEST: TERMS:");
te.seekExact(0);
while(true) {
System.out.println(" ord=" + te.ord() + " term=" + te.term().utf8ToString());
if (te.next() == null) {
break;
}
}
}
TermOrdsIterator iter = null;
final int[] buffer = new int[5];
for(int docID=0;docID<r.maxDoc();docID++) {
if (VERBOSE) {
System.out.println("TEST: docID=" + docID + " of " + r.maxDoc() + " (id=" + docIDToID[docID] + ")");
}
iter = dto.lookup(docID, iter);
final int[] answers = idToOrds[docIDToID[docID]];
int upto = 0;
while(true) {
final int chunk = iter.read(buffer);
for(int idx=0;idx<chunk;idx++) {
te.seekExact((long) buffer[idx]);
final BytesRef expected = termsArray[answers[upto++]];
if (VERBOSE) {
System.out.println(" exp=" + expected.utf8ToString() + " actual=" + te.term().utf8ToString());
}
assertEquals("expected=" + expected.utf8ToString() + " actual=" + te.term().utf8ToString() + " ord=" + buffer[idx], expected, te.term());
}
if (chunk < buffer.length) {
assertEquals(answers.length, upto);
break;
}
}
}
}
|
public String getNormFileName(int number) {
if (hasSeparateNorms(number)) {
return IndexFileNames.fileNameFromGeneration(name, "s" + number, normGen.get(number));
} else {
// single file for all norms
return IndexFileNames.fileNameFromGeneration(name, IndexFileNames.NORMS_EXTENSION, WITHOUT_GEN);
}
}
| public String getNormFileName(int number) {
if (hasSeparateNorms(number)) {
return IndexFileNames.fileNameFromGeneration(name, IndexFileNames.SEPARATE_NORMS_EXTENSION + number, normGen.get(number));
} else {
// single file for all norms
return IndexFileNames.fileNameFromGeneration(name, IndexFileNames.NORMS_EXTENSION, WITHOUT_GEN);
}
}
|
public Session(String[] arguments) throws IllegalArgumentException
{
float STDev = 0.1f;
CommandLineParser parser = new PosixParser();
try
{
CommandLine cmd = parser.parse(availableOptions, arguments);
if (cmd.getArgs().length > 0)
{
System.err.println("Application does not allow arbitrary arguments: " + StringUtils.join(cmd.getArgList(), ", "));
System.exit(1);
}
if (cmd.hasOption("h"))
throw new IllegalArgumentException("help");
if (cmd.hasOption("n"))
numKeys = Integer.parseInt(cmd.getOptionValue("n"));
if (cmd.hasOption("F"))
numDifferentKeys = Integer.parseInt(cmd.getOptionValue("F"));
else
numDifferentKeys = numKeys;
if (cmd.hasOption("N"))
skipKeys = Float.parseFloat(cmd.getOptionValue("N"));
if (cmd.hasOption("t"))
threads = Integer.parseInt(cmd.getOptionValue("t"));
if (cmd.hasOption("c"))
columns = Integer.parseInt(cmd.getOptionValue("c"));
if (cmd.hasOption("S"))
columnSize = Integer.parseInt(cmd.getOptionValue("S"));
if (cmd.hasOption("C"))
cardinality = Integer.parseInt(cmd.getOptionValue("C"));
if (cmd.hasOption("d"))
nodes = cmd.getOptionValue("d").split(",");
if (cmd.hasOption("D"))
{
try
{
String node = null;
List<String> tmpNodes = new ArrayList<String>();
BufferedReader in = new BufferedReader(new InputStreamReader(new FileInputStream(cmd.getOptionValue("D"))));
while ((node = in.readLine()) != null)
{
if (node.length() > 0)
tmpNodes.add(node);
}
nodes = tmpNodes.toArray(new String[tmpNodes.size()]);
in.close();
}
catch(IOException ioe)
{
throw new RuntimeException(ioe);
}
}
if (cmd.hasOption("s"))
STDev = Float.parseFloat(cmd.getOptionValue("s"));
if (cmd.hasOption("r"))
random = true;
outFileName = (cmd.hasOption("f")) ? cmd.getOptionValue("f") : null;
if (cmd.hasOption("p"))
port = Integer.parseInt(cmd.getOptionValue("p"));
if (cmd.hasOption("m"))
unframed = Boolean.parseBoolean(cmd.getOptionValue("m"));
if (cmd.hasOption("o"))
operation = Stress.Operations.valueOf(cmd.getOptionValue("o").toUpperCase());
if (cmd.hasOption("u"))
superColumns = Integer.parseInt(cmd.getOptionValue("u"));
if (cmd.hasOption("y"))
columnFamilyType = ColumnFamilyType.valueOf(cmd.getOptionValue("y"));
if (cmd.hasOption("K"))
{
retryTimes = Integer.valueOf(cmd.getOptionValue("K"));
if (retryTimes <= 0)
{
throw new RuntimeException("--keep-trying option value should be > 0");
}
}
if (cmd.hasOption("k"))
{
retryTimes = 1;
ignoreErrors = true;
}
if (cmd.hasOption("i"))
progressInterval = Integer.parseInt(cmd.getOptionValue("i"));
if (cmd.hasOption("g"))
keysPerCall = Integer.parseInt(cmd.getOptionValue("g"));
if (cmd.hasOption("e"))
consistencyLevel = ConsistencyLevel.valueOf(cmd.getOptionValue("e").toUpperCase());
if (cmd.hasOption("x"))
indexType = IndexType.valueOf(cmd.getOptionValue("x").toUpperCase());
if (cmd.hasOption("R"))
replicationStrategy = cmd.getOptionValue("R");
if (cmd.hasOption("l"))
replicationStrategyOptions.put("replication_factor", String.valueOf(Integer.parseInt(cmd.getOptionValue("l"))));
else if (replicationStrategy.endsWith("SimpleStrategy"))
replicationStrategyOptions.put("replication_factor", "1");
if (cmd.hasOption("O"))
{
String[] pairs = StringUtils.split(cmd.getOptionValue("O"), ',');
for (String pair : pairs)
{
String[] keyAndValue = StringUtils.split(pair, ':');
if (keyAndValue.length != 2)
throw new RuntimeException("Invalid --strategy-properties value.");
replicationStrategyOptions.put(keyAndValue[0], keyAndValue[1]);
}
}
if (cmd.hasOption("W"))
replicateOnWrite = false;
if (cmd.hasOption("I"))
compression = cmd.getOptionValue("I");
averageSizeValues = cmd.hasOption("V");
try
{
sendToDaemon = cmd.hasOption("send-to")
? InetAddress.getByName(cmd.getOptionValue("send-to"))
: null;
}
catch (UnknownHostException e)
{
throw new RuntimeException(e);
}
if (cmd.hasOption("Q"))
{
AbstractType comparator = TypeParser.parse(DEFAULT_COMPARATOR);
String[] names = StringUtils.split(cmd.getOptionValue("Q"), ",");
columnNames = new ArrayList<ByteBuffer>(names.length);
for (String columnName : names)
columnNames.add(comparator.fromString(columnName));
}
else
{
columnNames = null;
}
if (cmd.hasOption("Z"))
{
compactionStrategy = cmd.getOptionValue("Z");
try
{
// validate compaction strategy class
CFMetaData.createCompactionSrategy(compactionStrategy);
}
catch (ConfigurationException e)
{
System.err.println(e.getMessage());
System.exit(1);
}
}
if (cmd.hasOption("U"))
{
AbstractType parsed = null;
try
{
parsed = TypeParser.parse(cmd.getOptionValue("U"));
}
catch (ConfigurationException e)
{
System.err.println(e.getMessage());
System.exit(1);
}
comparator = cmd.getOptionValue("U");
timeUUIDComparator = parsed instanceof TimeUUIDType;
if (!(parsed instanceof TimeUUIDType || parsed instanceof AsciiType || parsed instanceof UTF8Type))
{
System.err.println("Currently supported types are: TimeUUIDType, AsciiType, UTF8Type.");
System.exit(1);
}
}
else
{
comparator = null;
timeUUIDComparator = false;
}
}
catch (ParseException e)
{
throw new IllegalArgumentException(e.getMessage(), e);
}
catch (ConfigurationException e)
{
throw new IllegalStateException(e.getMessage(), e);
}
mean = numDifferentKeys / 2;
sigma = numDifferentKeys * STDev;
operations = new AtomicInteger();
keys = new AtomicInteger();
latency = new AtomicLong();
}
| public Session(String[] arguments) throws IllegalArgumentException
{
float STDev = 0.1f;
CommandLineParser parser = new PosixParser();
try
{
CommandLine cmd = parser.parse(availableOptions, arguments);
if (cmd.getArgs().length > 0)
{
System.err.println("Application does not allow arbitrary arguments: " + StringUtils.join(cmd.getArgList(), ", "));
System.exit(1);
}
if (cmd.hasOption("h"))
throw new IllegalArgumentException("help");
if (cmd.hasOption("n"))
numKeys = Integer.parseInt(cmd.getOptionValue("n"));
if (cmd.hasOption("F"))
numDifferentKeys = Integer.parseInt(cmd.getOptionValue("F"));
else
numDifferentKeys = numKeys;
if (cmd.hasOption("N"))
skipKeys = Float.parseFloat(cmd.getOptionValue("N"));
if (cmd.hasOption("t"))
threads = Integer.parseInt(cmd.getOptionValue("t"));
if (cmd.hasOption("c"))
columns = Integer.parseInt(cmd.getOptionValue("c"));
if (cmd.hasOption("S"))
columnSize = Integer.parseInt(cmd.getOptionValue("S"));
if (cmd.hasOption("C"))
cardinality = Integer.parseInt(cmd.getOptionValue("C"));
if (cmd.hasOption("d"))
nodes = cmd.getOptionValue("d").split(",");
if (cmd.hasOption("D"))
{
try
{
String node = null;
List<String> tmpNodes = new ArrayList<String>();
BufferedReader in = new BufferedReader(new InputStreamReader(new FileInputStream(cmd.getOptionValue("D"))));
while ((node = in.readLine()) != null)
{
if (node.length() > 0)
tmpNodes.add(node);
}
nodes = tmpNodes.toArray(new String[tmpNodes.size()]);
in.close();
}
catch(IOException ioe)
{
throw new RuntimeException(ioe);
}
}
if (cmd.hasOption("s"))
STDev = Float.parseFloat(cmd.getOptionValue("s"));
if (cmd.hasOption("r"))
random = true;
outFileName = (cmd.hasOption("f")) ? cmd.getOptionValue("f") : null;
if (cmd.hasOption("p"))
port = Integer.parseInt(cmd.getOptionValue("p"));
if (cmd.hasOption("m"))
unframed = Boolean.parseBoolean(cmd.getOptionValue("m"));
if (cmd.hasOption("o"))
operation = Stress.Operations.valueOf(cmd.getOptionValue("o").toUpperCase());
if (cmd.hasOption("u"))
superColumns = Integer.parseInt(cmd.getOptionValue("u"));
if (cmd.hasOption("y"))
columnFamilyType = ColumnFamilyType.valueOf(cmd.getOptionValue("y"));
if (cmd.hasOption("K"))
{
retryTimes = Integer.valueOf(cmd.getOptionValue("K"));
if (retryTimes <= 0)
{
throw new RuntimeException("--keep-trying option value should be > 0");
}
}
if (cmd.hasOption("k"))
{
retryTimes = 1;
ignoreErrors = true;
}
if (cmd.hasOption("i"))
progressInterval = Integer.parseInt(cmd.getOptionValue("i"));
if (cmd.hasOption("g"))
keysPerCall = Integer.parseInt(cmd.getOptionValue("g"));
if (cmd.hasOption("e"))
consistencyLevel = ConsistencyLevel.valueOf(cmd.getOptionValue("e").toUpperCase());
if (cmd.hasOption("x"))
indexType = IndexType.valueOf(cmd.getOptionValue("x").toUpperCase());
if (cmd.hasOption("R"))
replicationStrategy = cmd.getOptionValue("R");
if (cmd.hasOption("l"))
replicationStrategyOptions.put("replication_factor", String.valueOf(Integer.parseInt(cmd.getOptionValue("l"))));
else if (replicationStrategy.endsWith("SimpleStrategy"))
replicationStrategyOptions.put("replication_factor", "1");
if (cmd.hasOption("O"))
{
String[] pairs = StringUtils.split(cmd.getOptionValue("O"), ',');
for (String pair : pairs)
{
String[] keyAndValue = StringUtils.split(pair, ':');
if (keyAndValue.length != 2)
throw new RuntimeException("Invalid --strategy-properties value.");
replicationStrategyOptions.put(keyAndValue[0], keyAndValue[1]);
}
}
if (cmd.hasOption("W"))
replicateOnWrite = false;
if (cmd.hasOption("I"))
compression = cmd.getOptionValue("I");
averageSizeValues = cmd.hasOption("V");
try
{
sendToDaemon = cmd.hasOption("send-to")
? InetAddress.getByName(cmd.getOptionValue("send-to"))
: null;
}
catch (UnknownHostException e)
{
throw new RuntimeException(e);
}
if (cmd.hasOption("Q"))
{
AbstractType comparator = TypeParser.parse(DEFAULT_COMPARATOR);
String[] names = StringUtils.split(cmd.getOptionValue("Q"), ",");
columnNames = new ArrayList<ByteBuffer>(names.length);
for (String columnName : names)
columnNames.add(comparator.fromString(columnName));
}
else
{
columnNames = null;
}
if (cmd.hasOption("Z"))
{
compactionStrategy = cmd.getOptionValue("Z");
try
{
// validate compaction strategy class
CFMetaData.createCompactionStrategy(compactionStrategy);
}
catch (ConfigurationException e)
{
System.err.println(e.getMessage());
System.exit(1);
}
}
if (cmd.hasOption("U"))
{
AbstractType parsed = null;
try
{
parsed = TypeParser.parse(cmd.getOptionValue("U"));
}
catch (ConfigurationException e)
{
System.err.println(e.getMessage());
System.exit(1);
}
comparator = cmd.getOptionValue("U");
timeUUIDComparator = parsed instanceof TimeUUIDType;
if (!(parsed instanceof TimeUUIDType || parsed instanceof AsciiType || parsed instanceof UTF8Type))
{
System.err.println("Currently supported types are: TimeUUIDType, AsciiType, UTF8Type.");
System.exit(1);
}
}
else
{
comparator = null;
timeUUIDComparator = false;
}
}
catch (ParseException e)
{
throw new IllegalArgumentException(e.getMessage(), e);
}
catch (ConfigurationException e)
{
throw new IllegalStateException(e.getMessage(), e);
}
mean = numDifferentKeys / 2;
sigma = numDifferentKeys * STDev;
operations = new AtomicInteger();
keys = new AtomicInteger();
latency = new AtomicLong();
}
|
public static String updateJ(String json, SolrParams args) throws Exception {
SolrCore core = h.getCore();
DirectSolrConnection connection = new DirectSolrConnection(core);
SolrRequestHandler handler = core.getRequestHandler("/udate/json");
if (handler == null) {
handler = new JsonUpdateRequestHandler();
handler.init(null);
}
return connection.request(handler, args, json);
}
| public static String updateJ(String json, SolrParams args) throws Exception {
SolrCore core = h.getCore();
DirectSolrConnection connection = new DirectSolrConnection(core);
SolrRequestHandler handler = core.getRequestHandler("/update/json");
if (handler == null) {
handler = new JsonUpdateRequestHandler();
handler.init(null);
}
return connection.request(handler, args, json);
}
|
public void testIndexCreate() throws IOException, ConfigurationException, InterruptedException, ExecutionException
{
Table table = Table.open("Keyspace1");
// create a row and update the birthdate value, test that the index query fetches the new version
RowMutation rm;
rm = new RowMutation("Keyspace1", ByteBufferUtil.bytes("k1"));
rm.add(new QueryPath("Indexed2", null, ByteBufferUtil.bytes("birthdate")), ByteBufferUtil.bytes(1L), 1);
rm.apply();
ColumnFamilyStore cfs = table.getColumnFamilyStore("Indexed2");
ColumnDefinition old = cfs.metadata.getColumn_metadata().get(ByteBufferUtil.bytes("birthdate"));
ColumnDefinition cd = new ColumnDefinition(old.name, old.validator.getClass().getName(), IndexType.KEYS, "birthdate_index");
Future<?> future = cfs.addIndex(cd);
future.get();
// we had a bug (CASSANDRA-2244) where index would get created but not flushed -- check for that
assert cfs.getIndexedColumnFamilyStore(cd.name).getSSTables().size() > 0;
queryBirthdate(table);
// validate that drop clears it out & rebuild works (CASSANDRA-2320)
ColumnFamilyStore indexedCfs = cfs.getIndexedColumnFamilyStore(ByteBufferUtil.bytes("birthdate"));
cfs.removeIndex(ByteBufferUtil.bytes("birthdate"));
assert !indexedCfs.isIndexBuilt();
// rebuild & re-query
future = cfs.addIndex(cd);
future.get();
queryBirthdate(table);
}
| public void testIndexCreate() throws IOException, ConfigurationException, InterruptedException, ExecutionException
{
Table table = Table.open("Keyspace1");
// create a row and update the birthdate value, test that the index query fetches the new version
RowMutation rm;
rm = new RowMutation("Keyspace1", ByteBufferUtil.bytes("k1"));
rm.add(new QueryPath("Indexed2", null, ByteBufferUtil.bytes("birthdate")), ByteBufferUtil.bytes(1L), 1);
rm.apply();
ColumnFamilyStore cfs = table.getColumnFamilyStore("Indexed2");
ColumnDefinition old = cfs.metadata.getColumn_metadata().get(ByteBufferUtil.bytes("birthdate"));
ColumnDefinition cd = new ColumnDefinition(old.name, old.validator, IndexType.KEYS, "birthdate_index");
Future<?> future = cfs.addIndex(cd);
future.get();
// we had a bug (CASSANDRA-2244) where index would get created but not flushed -- check for that
assert cfs.getIndexedColumnFamilyStore(cd.name).getSSTables().size() > 0;
queryBirthdate(table);
// validate that drop clears it out & rebuild works (CASSANDRA-2320)
ColumnFamilyStore indexedCfs = cfs.getIndexedColumnFamilyStore(ByteBufferUtil.bytes("birthdate"));
cfs.removeIndex(ByteBufferUtil.bytes("birthdate"));
assert !indexedCfs.isIndexBuilt();
// rebuild & re-query
future = cfs.addIndex(cd);
future.get();
queryBirthdate(table);
}
|
public void reload()
{
// metadata object has been mutated directly. make all the members jibe with new settings.
// only update these runtime-modifiable settings if they have not been modified.
if (!minCompactionThreshold.isModified())
for (ColumnFamilyStore cfs : concatWithIndexes())
cfs.minCompactionThreshold = new DefaultInteger(metadata.getMinCompactionThreshold());
if (!maxCompactionThreshold.isModified())
for (ColumnFamilyStore cfs : concatWithIndexes())
cfs.maxCompactionThreshold = new DefaultInteger(metadata.getMaxCompactionThreshold());
if (!memtime.isModified())
for (ColumnFamilyStore cfs : concatWithIndexes())
cfs.memtime = new DefaultInteger(metadata.getMemtableFlushAfterMins());
if (!memsize.isModified())
for (ColumnFamilyStore cfs : concatWithIndexes())
cfs.memsize = new DefaultInteger(metadata.getMemtableThroughputInMb());
if (!memops.isModified())
for (ColumnFamilyStore cfs : concatWithIndexes())
cfs.memops = new DefaultDouble(metadata.getMemtableOperationsInMillions());
if (!rowCacheSaveInSeconds.isModified())
rowCacheSaveInSeconds = new DefaultInteger(metadata.getRowCacheSavePeriodInSeconds());
if (!keyCacheSaveInSeconds.isModified())
keyCacheSaveInSeconds = new DefaultInteger(metadata.getKeyCacheSavePeriodInSeconds());
ssTables.updateCacheSizes();
scheduleCacheSaving(rowCacheSaveInSeconds.value(), keyCacheSaveInSeconds.value());
// figure out what needs to be added and dropped.
// future: if/when we have modifiable settings for secondary indexes, they'll need to be handled here.
for (ByteBuffer indexedColumn : indexedColumns.keySet())
{
if (!metadata.getColumn_metadata().containsKey(indexedColumn))
removeIndex(indexedColumn);
}
for (ColumnDefinition cdef : metadata.getColumn_metadata().values())
if (cdef.getIndexType() != null && !indexedColumns.containsKey(cdef.name))
addIndex(cdef);
}
void removeIndex(ByteBuffer indexedColumn)
{
ColumnFamilyStore indexCfs = indexedColumns.remove(indexedColumn);
if (indexCfs == null)
{
logger.debug("index {} already removed; ignoring", ByteBufferUtil.bytesToHex(indexedColumn));
return;
}
indexCfs.unregisterMBean();
SystemTable.setIndexRemoved(metadata.tableName, indexCfs.columnFamily);
indexCfs.removeAllSSTables();
}
| public void reload()
{
// metadata object has been mutated directly. make all the members jibe with new settings.
// only update these runtime-modifiable settings if they have not been modified.
if (!minCompactionThreshold.isModified())
for (ColumnFamilyStore cfs : concatWithIndexes())
cfs.minCompactionThreshold = new DefaultInteger(metadata.getMinCompactionThreshold());
if (!maxCompactionThreshold.isModified())
for (ColumnFamilyStore cfs : concatWithIndexes())
cfs.maxCompactionThreshold = new DefaultInteger(metadata.getMaxCompactionThreshold());
if (!memtime.isModified())
for (ColumnFamilyStore cfs : concatWithIndexes())
cfs.memtime = new DefaultInteger(metadata.getMemtableFlushAfterMins());
if (!memsize.isModified())
for (ColumnFamilyStore cfs : concatWithIndexes())
cfs.memsize = new DefaultInteger(metadata.getMemtableThroughputInMb());
if (!memops.isModified())
for (ColumnFamilyStore cfs : concatWithIndexes())
cfs.memops = new DefaultDouble(metadata.getMemtableOperationsInMillions());
if (!rowCacheSaveInSeconds.isModified())
rowCacheSaveInSeconds = new DefaultInteger(metadata.getRowCacheSavePeriodInSeconds());
if (!keyCacheSaveInSeconds.isModified())
keyCacheSaveInSeconds = new DefaultInteger(metadata.getKeyCacheSavePeriodInSeconds());
ssTables.updateCacheSizes();
scheduleCacheSaving(rowCacheSaveInSeconds.value(), keyCacheSaveInSeconds.value());
// figure out what needs to be added and dropped.
// future: if/when we have modifiable settings for secondary indexes, they'll need to be handled here.
for (ByteBuffer indexedColumn : indexedColumns.keySet())
{
if (!metadata.getColumn_metadata().containsKey(indexedColumn))
removeIndex(indexedColumn);
}
for (ColumnDefinition cdef : metadata.getColumn_metadata().values())
if (cdef.getIndexType() != null && !indexedColumns.containsKey(cdef.name))
addIndex(cdef);
}
void removeIndex(ByteBuffer indexedColumn)
{
ColumnFamilyStore indexCfs = indexedColumns.remove(indexedColumn);
if (indexCfs == null)
{
logger.debug("index {} already removed; ignoring", ByteBufferUtil.bytesToHex(indexedColumn));
return;
}
indexCfs.unregisterMBean();
SystemTable.setIndexRemoved(metadata.ksName, indexCfs.columnFamily);
indexCfs.removeAllSSTables();
}
|
public AllSimilarItemsCandidateItemsStrategy(ItemSimilarity similarity) {
Preconditions.checkArgument(similarity != null, "similarity is null");
this.similarity = similarity;
}
@Override
FastIDSet doGetCandidateItems(long[] preferredItemIDs, DataModel dataModel) throws TasteException {
FastIDSet candidateItemIDs = new FastIDSet();
for (long itemID : preferredItemIDs) {
candidateItemIDs.addAll(similarity.allSimilarItemIDs(itemID));
}
candidateItemIDs.removeAll(preferredItemIDs);
return candidateItemIDs;
}
} | protected FastIDSet doGetCandidateItems(long[] preferredItemIDs, DataModel dataModel) throws TasteException {
FastIDSet candidateItemIDs = new FastIDSet();
for (long itemID : preferredItemIDs) {
candidateItemIDs.addAll(similarity.allSimilarItemIDs(itemID));
}
candidateItemIDs.removeAll(preferredItemIDs);
return candidateItemIDs;
}
} |
public void testMissingField() throws Exception {
String fieldName = "field1";
Directory rd1 = newDirectory();
RandomIndexWriter w1 = new RandomIndexWriter(random(), rd1);
Document doc = new Document();
doc.add(newStringField(fieldName, "content1", Field.Store.YES));
w1.addDocument(doc);
IndexReader reader1 = w1.getReader();
w1.close();
fieldName = "field2";
Directory rd2 = newDirectory();
RandomIndexWriter w2 = new RandomIndexWriter(random(), rd2);
doc = new Document();
doc.add(newStringField(fieldName, "content2", Field.Store.YES));
w2.addDocument(doc);
IndexReader reader2 = w2.getReader();
w2.close();
TermsFilter tf = new TermsFilter();
tf.addTerm(new Term(fieldName, "content1"));
MultiReader multi = new MultiReader(reader1, reader2);
for (AtomicReaderContext context : multi.getTopReaderContext().leaves()) {
FixedBitSet bits = (FixedBitSet) tf.getDocIdSet(context, context.reader().getLiveDocs());
assertTrue("Must be >= 0", bits.cardinality() >= 0);
}
multi.close();
reader1.close();
reader2.close();
rd1.close();
rd2.close();
}
| public void testMissingField() throws Exception {
String fieldName = "field1";
Directory rd1 = newDirectory();
RandomIndexWriter w1 = new RandomIndexWriter(random(), rd1);
Document doc = new Document();
doc.add(newStringField(fieldName, "content1", Field.Store.YES));
w1.addDocument(doc);
IndexReader reader1 = w1.getReader();
w1.close();
fieldName = "field2";
Directory rd2 = newDirectory();
RandomIndexWriter w2 = new RandomIndexWriter(random(), rd2);
doc = new Document();
doc.add(newStringField(fieldName, "content2", Field.Store.YES));
w2.addDocument(doc);
IndexReader reader2 = w2.getReader();
w2.close();
TermsFilter tf = new TermsFilter();
tf.addTerm(new Term(fieldName, "content1"));
MultiReader multi = new MultiReader(reader1, reader2);
for (AtomicReaderContext context : multi.leaves()) {
FixedBitSet bits = (FixedBitSet) tf.getDocIdSet(context, context.reader().getLiveDocs());
assertTrue("Must be >= 0", bits.cardinality() >= 0);
}
multi.close();
reader1.close();
reader2.close();
rd1.close();
rd2.close();
}
|
public final void indexDictionary(Dictionary dict, IndexWriterConfig config, boolean fullMerge) throws IOException {
synchronized (modifyCurrentIndexLock) {
ensureOpen();
final Directory dir = this.spellIndex;
final IndexWriter writer = new IndexWriter(dir, config);
IndexSearcher indexSearcher = obtainSearcher();
final List<TermsEnum> termsEnums = new ArrayList<TermsEnum>();
final IndexReader reader = searcher.getIndexReader();
if (reader.maxDoc() > 0) {
for (final AtomicReaderContext ctx : reader.getTopReaderContext().leaves()) {
Terms terms = ctx.reader().terms(F_WORD);
if (terms != null)
termsEnums.add(terms.iterator(null));
}
}
boolean isEmpty = termsEnums.isEmpty();
try {
BytesRefIterator iter = dict.getWordsIterator();
BytesRef currentTerm;
terms: while ((currentTerm = iter.next()) != null) {
String word = currentTerm.utf8ToString();
int len = word.length();
if (len < 3) {
continue; // too short we bail but "too long" is fine...
}
if (!isEmpty) {
for (TermsEnum te : termsEnums) {
if (te.seekExact(currentTerm, false)) {
continue terms;
}
}
}
// ok index the word
Document doc = createDocument(word, getMin(len), getMax(len));
writer.addDocument(doc);
}
} finally {
releaseSearcher(indexSearcher);
}
if (fullMerge) {
writer.forceMerge(1);
}
// close writer
writer.close();
// TODO: this isn't that great, maybe in the future SpellChecker should take
// IWC in its ctor / keep its writer open?
// also re-open the spell index to see our own changes when the next suggestion
// is fetched:
swapSearcher(dir);
}
}
| public final void indexDictionary(Dictionary dict, IndexWriterConfig config, boolean fullMerge) throws IOException {
synchronized (modifyCurrentIndexLock) {
ensureOpen();
final Directory dir = this.spellIndex;
final IndexWriter writer = new IndexWriter(dir, config);
IndexSearcher indexSearcher = obtainSearcher();
final List<TermsEnum> termsEnums = new ArrayList<TermsEnum>();
final IndexReader reader = searcher.getIndexReader();
if (reader.maxDoc() > 0) {
for (final AtomicReaderContext ctx : reader.leaves()) {
Terms terms = ctx.reader().terms(F_WORD);
if (terms != null)
termsEnums.add(terms.iterator(null));
}
}
boolean isEmpty = termsEnums.isEmpty();
try {
BytesRefIterator iter = dict.getWordsIterator();
BytesRef currentTerm;
terms: while ((currentTerm = iter.next()) != null) {
String word = currentTerm.utf8ToString();
int len = word.length();
if (len < 3) {
continue; // too short we bail but "too long" is fine...
}
if (!isEmpty) {
for (TermsEnum te : termsEnums) {
if (te.seekExact(currentTerm, false)) {
continue terms;
}
}
}
// ok index the word
Document doc = createDocument(word, getMin(len), getMax(len));
writer.addDocument(doc);
}
} finally {
releaseSearcher(indexSearcher);
}
if (fullMerge) {
writer.forceMerge(1);
}
// close writer
writer.close();
// TODO: this isn't that great, maybe in the future SpellChecker should take
// IWC in its ctor / keep its writer open?
// also re-open the spell index to see our own changes when the next suggestion
// is fetched:
swapSearcher(dir);
}
}
|
public void testTermUTF16SortOrder() throws Throwable {
Random rnd = random();
Directory dir = newDirectory();
RandomIndexWriter writer = new RandomIndexWriter(rnd, dir);
Document d = new Document();
// Single segment
Field f = newStringField("f", "", Field.Store.NO);
d.add(f);
char[] chars = new char[2];
final Set<String> allTerms = new HashSet<String>();
int num = atLeast(200);
for (int i = 0; i < num; i++) {
final String s;
if (rnd.nextBoolean()) {
// Single char
if (rnd.nextBoolean()) {
// Above surrogates
chars[0] = (char) getInt(rnd, 1+UnicodeUtil.UNI_SUR_LOW_END, 0xffff);
} else {
// Below surrogates
chars[0] = (char) getInt(rnd, 0, UnicodeUtil.UNI_SUR_HIGH_START-1);
}
s = new String(chars, 0, 1);
} else {
// Surrogate pair
chars[0] = (char) getInt(rnd, UnicodeUtil.UNI_SUR_HIGH_START, UnicodeUtil.UNI_SUR_HIGH_END);
assertTrue(((int) chars[0]) >= UnicodeUtil.UNI_SUR_HIGH_START && ((int) chars[0]) <= UnicodeUtil.UNI_SUR_HIGH_END);
chars[1] = (char) getInt(rnd, UnicodeUtil.UNI_SUR_LOW_START, UnicodeUtil.UNI_SUR_LOW_END);
s = new String(chars, 0, 2);
}
allTerms.add(s);
f.setStringValue(s);
writer.addDocument(d);
if ((1+i) % 42 == 0) {
writer.commit();
}
}
IndexReader r = writer.getReader();
// Test each sub-segment
for (AtomicReaderContext ctx : r.getTopReaderContext().leaves()) {
checkTermsOrder(ctx.reader(), allTerms, false);
}
checkTermsOrder(r, allTerms, true);
// Test multi segment
r.close();
writer.forceMerge(1);
// Test single segment
r = writer.getReader();
checkTermsOrder(r, allTerms, true);
r.close();
writer.close();
dir.close();
}
| public void testTermUTF16SortOrder() throws Throwable {
Random rnd = random();
Directory dir = newDirectory();
RandomIndexWriter writer = new RandomIndexWriter(rnd, dir);
Document d = new Document();
// Single segment
Field f = newStringField("f", "", Field.Store.NO);
d.add(f);
char[] chars = new char[2];
final Set<String> allTerms = new HashSet<String>();
int num = atLeast(200);
for (int i = 0; i < num; i++) {
final String s;
if (rnd.nextBoolean()) {
// Single char
if (rnd.nextBoolean()) {
// Above surrogates
chars[0] = (char) getInt(rnd, 1+UnicodeUtil.UNI_SUR_LOW_END, 0xffff);
} else {
// Below surrogates
chars[0] = (char) getInt(rnd, 0, UnicodeUtil.UNI_SUR_HIGH_START-1);
}
s = new String(chars, 0, 1);
} else {
// Surrogate pair
chars[0] = (char) getInt(rnd, UnicodeUtil.UNI_SUR_HIGH_START, UnicodeUtil.UNI_SUR_HIGH_END);
assertTrue(((int) chars[0]) >= UnicodeUtil.UNI_SUR_HIGH_START && ((int) chars[0]) <= UnicodeUtil.UNI_SUR_HIGH_END);
chars[1] = (char) getInt(rnd, UnicodeUtil.UNI_SUR_LOW_START, UnicodeUtil.UNI_SUR_LOW_END);
s = new String(chars, 0, 2);
}
allTerms.add(s);
f.setStringValue(s);
writer.addDocument(d);
if ((1+i) % 42 == 0) {
writer.commit();
}
}
IndexReader r = writer.getReader();
// Test each sub-segment
for (AtomicReaderContext ctx : r.leaves()) {
checkTermsOrder(ctx.reader(), allTerms, false);
}
checkTermsOrder(r, allTerms, true);
// Test multi segment
r.close();
writer.forceMerge(1);
// Test single segment
r = writer.getReader();
checkTermsOrder(r, allTerms, true);
r.close();
writer.close();
dir.close();
}
|
public void testTermVectorExceptions() throws IOException {
FailOnTermVectors[] failures = new FailOnTermVectors[] {
new FailOnTermVectors(FailOnTermVectors.AFTER_INIT_STAGE),
new FailOnTermVectors(FailOnTermVectors.INIT_STAGE), };
int num = atLeast(1);
for (int j = 0; j < num; j++) {
for (FailOnTermVectors failure : failures) {
MockDirectoryWrapper dir = newMockDirectory();
IndexWriter w = new IndexWriter(dir, newIndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer(random())));
dir.failOn(failure);
int numDocs = 10 + random().nextInt(30);
for (int i = 0; i < numDocs; i++) {
Document doc = new Document();
Field field = newTextField(random(), "field", "a field", Field.Store.YES);
doc.add(field);
// random TV
try {
w.addDocument(doc);
assertFalse(field.fieldType().storeTermVectors());
} catch (RuntimeException e) {
assertTrue(e.getMessage().startsWith(FailOnTermVectors.EXC_MSG));
}
if (random().nextInt(20) == 0) {
w.commit();
_TestUtil.checkIndex(dir);
}
}
Document document = new Document();
document.add(new TextField("field", "a field", Field.Store.YES));
w.addDocument(document);
for (int i = 0; i < numDocs; i++) {
Document doc = new Document();
Field field = newTextField(random(), "field", "a field", Field.Store.YES);
doc.add(field);
// random TV
try {
w.addDocument(doc);
assertFalse(field.fieldType().storeTermVectors());
} catch (RuntimeException e) {
assertTrue(e.getMessage().startsWith(FailOnTermVectors.EXC_MSG));
}
if (random().nextInt(20) == 0) {
w.commit();
_TestUtil.checkIndex(dir);
}
}
document = new Document();
document.add(new TextField("field", "a field", Field.Store.YES));
w.addDocument(document);
w.close();
IndexReader reader = DirectoryReader.open(dir);
assertTrue(reader.numDocs() > 0);
SegmentInfos sis = new SegmentInfos();
sis.read(dir);
for(AtomicReaderContext context : reader.getTopReaderContext().leaves()) {
assertFalse(context.reader().getFieldInfos().hasVectors());
}
reader.close();
dir.close();
}
}
| public void testTermVectorExceptions() throws IOException {
FailOnTermVectors[] failures = new FailOnTermVectors[] {
new FailOnTermVectors(FailOnTermVectors.AFTER_INIT_STAGE),
new FailOnTermVectors(FailOnTermVectors.INIT_STAGE), };
int num = atLeast(1);
for (int j = 0; j < num; j++) {
for (FailOnTermVectors failure : failures) {
MockDirectoryWrapper dir = newMockDirectory();
IndexWriter w = new IndexWriter(dir, newIndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer(random())));
dir.failOn(failure);
int numDocs = 10 + random().nextInt(30);
for (int i = 0; i < numDocs; i++) {
Document doc = new Document();
Field field = newTextField(random(), "field", "a field", Field.Store.YES);
doc.add(field);
// random TV
try {
w.addDocument(doc);
assertFalse(field.fieldType().storeTermVectors());
} catch (RuntimeException e) {
assertTrue(e.getMessage().startsWith(FailOnTermVectors.EXC_MSG));
}
if (random().nextInt(20) == 0) {
w.commit();
_TestUtil.checkIndex(dir);
}
}
Document document = new Document();
document.add(new TextField("field", "a field", Field.Store.YES));
w.addDocument(document);
for (int i = 0; i < numDocs; i++) {
Document doc = new Document();
Field field = newTextField(random(), "field", "a field", Field.Store.YES);
doc.add(field);
// random TV
try {
w.addDocument(doc);
assertFalse(field.fieldType().storeTermVectors());
} catch (RuntimeException e) {
assertTrue(e.getMessage().startsWith(FailOnTermVectors.EXC_MSG));
}
if (random().nextInt(20) == 0) {
w.commit();
_TestUtil.checkIndex(dir);
}
}
document = new Document();
document.add(new TextField("field", "a field", Field.Store.YES));
w.addDocument(document);
w.close();
IndexReader reader = DirectoryReader.open(dir);
assertTrue(reader.numDocs() > 0);
SegmentInfos sis = new SegmentInfos();
sis.read(dir);
for(AtomicReaderContext context : reader.leaves()) {
assertFalse(context.reader().getFieldInfos().hasVectors());
}
reader.close();
dir.close();
}
}
|
public synchronized boolean tryDeleteDocument(IndexReader readerIn, int docID) throws IOException {
final AtomicReader reader;
if (readerIn instanceof AtomicReader) {
// Reader is already atomic: use the incoming docID:
reader = (AtomicReader) readerIn;
} else {
// Composite reader: lookup sub-reader and re-base docID:
List<AtomicReaderContext> leaves = readerIn.getTopReaderContext().leaves();
int subIndex = ReaderUtil.subIndex(docID, leaves);
reader = leaves.get(subIndex).reader();
docID -= leaves.get(subIndex).docBase;
assert docID >= 0;
assert docID < reader.maxDoc();
}
if (!(reader instanceof SegmentReader)) {
throw new IllegalArgumentException("the reader must be a SegmentReader or composite reader containing only SegmentReaders");
}
final SegmentInfoPerCommit info = ((SegmentReader) reader).getSegmentInfo();
// TODO: this is a slow linear search, but, number of
// segments should be contained unless something is
// seriously wrong w/ the index, so it should be a minor
// cost:
if (segmentInfos.indexOf(info) != -1) {
ReadersAndLiveDocs rld = readerPool.get(info, false);
if (rld != null) {
synchronized(bufferedDeletesStream) {
rld.initWritableLiveDocs();
if (rld.delete(docID)) {
final int fullDelCount = rld.info.getDelCount() + rld.getPendingDeleteCount();
if (fullDelCount == rld.info.info.getDocCount()) {
// If a merge has already registered for this
// segment, we leave it in the readerPool; the
// merge will skip merging it and will then drop
// it once it's done:
if (!mergingSegments.contains(rld.info)) {
segmentInfos.remove(rld.info);
readerPool.drop(rld.info);
checkpoint();
}
}
}
//System.out.println(" yes " + info.info.name + " " + docID);
return true;
}
} else {
//System.out.println(" no rld " + info.info.name + " " + docID);
}
} else {
//System.out.println(" no seg " + info.info.name + " " + docID);
}
return false;
}
| public synchronized boolean tryDeleteDocument(IndexReader readerIn, int docID) throws IOException {
final AtomicReader reader;
if (readerIn instanceof AtomicReader) {
// Reader is already atomic: use the incoming docID:
reader = (AtomicReader) readerIn;
} else {
// Composite reader: lookup sub-reader and re-base docID:
List<AtomicReaderContext> leaves = readerIn.leaves();
int subIndex = ReaderUtil.subIndex(docID, leaves);
reader = leaves.get(subIndex).reader();
docID -= leaves.get(subIndex).docBase;
assert docID >= 0;
assert docID < reader.maxDoc();
}
if (!(reader instanceof SegmentReader)) {
throw new IllegalArgumentException("the reader must be a SegmentReader or composite reader containing only SegmentReaders");
}
final SegmentInfoPerCommit info = ((SegmentReader) reader).getSegmentInfo();
// TODO: this is a slow linear search, but, number of
// segments should be contained unless something is
// seriously wrong w/ the index, so it should be a minor
// cost:
if (segmentInfos.indexOf(info) != -1) {
ReadersAndLiveDocs rld = readerPool.get(info, false);
if (rld != null) {
synchronized(bufferedDeletesStream) {
rld.initWritableLiveDocs();
if (rld.delete(docID)) {
final int fullDelCount = rld.info.getDelCount() + rld.getPendingDeleteCount();
if (fullDelCount == rld.info.info.getDocCount()) {
// If a merge has already registered for this
// segment, we leave it in the readerPool; the
// merge will skip merging it and will then drop
// it once it's done:
if (!mergingSegments.contains(rld.info)) {
segmentInfos.remove(rld.info);
readerPool.drop(rld.info);
checkpoint();
}
}
}
//System.out.println(" yes " + info.info.name + " " + docID);
return true;
}
} else {
//System.out.println(" no rld " + info.info.name + " " + docID);
}
} else {
//System.out.println(" no seg " + info.info.name + " " + docID);
}
return false;
}
|
private final FieldInfos.Builder fieldInfosBuilder;
// note, just like in codec apis Directory 'dir' is NOT the same as segmentInfo.dir!!
SegmentMerger(SegmentInfo segmentInfo, InfoStream infoStream, Directory dir, int termIndexInterval,
MergeState.CheckAbort checkAbort, PayloadProcessorProvider payloadProcessorProvider,
FieldInfos.FieldNumbers fieldNumbers, IOContext context) {
mergeState.segmentInfo = segmentInfo;
mergeState.infoStream = infoStream;
mergeState.readers = new ArrayList<AtomicReader>();
mergeState.checkAbort = checkAbort;
mergeState.payloadProcessorProvider = payloadProcessorProvider;
directory = dir;
this.termIndexInterval = termIndexInterval;
this.codec = segmentInfo.getCodec();
this.context = context;
this.fieldInfosBuilder = new FieldInfos.Builder(fieldNumbers);
}
/**
* Add an IndexReader to the collection of readers that are to be merged
* @param reader
*/
final void add(IndexReader reader) {
for (final AtomicReaderContext ctx : reader.getTopReaderContext().leaves()) {
final AtomicReader r = ctx.reader();
mergeState.readers.add(r);
}
}
| private final FieldInfos.Builder fieldInfosBuilder;
// note, just like in codec apis Directory 'dir' is NOT the same as segmentInfo.dir!!
SegmentMerger(SegmentInfo segmentInfo, InfoStream infoStream, Directory dir, int termIndexInterval,
MergeState.CheckAbort checkAbort, PayloadProcessorProvider payloadProcessorProvider,
FieldInfos.FieldNumbers fieldNumbers, IOContext context) {
mergeState.segmentInfo = segmentInfo;
mergeState.infoStream = infoStream;
mergeState.readers = new ArrayList<AtomicReader>();
mergeState.checkAbort = checkAbort;
mergeState.payloadProcessorProvider = payloadProcessorProvider;
directory = dir;
this.termIndexInterval = termIndexInterval;
this.codec = segmentInfo.getCodec();
this.context = context;
this.fieldInfosBuilder = new FieldInfos.Builder(fieldNumbers);
}
/**
* Add an IndexReader to the collection of readers that are to be merged
* @param reader
*/
final void add(IndexReader reader) {
for (final AtomicReaderContext ctx : reader.leaves()) {
final AtomicReader r = ctx.reader();
mergeState.readers.add(r);
}
}
|
private static DocValues getDocValues(IndexReader reader, final String field, final DocValuesPuller puller) throws IOException {
if (reader instanceof AtomicReader) {
// already an atomic reader
return puller.pull((AtomicReader) reader, field);
}
assert reader instanceof CompositeReader;
final List<AtomicReaderContext> leaves = reader.getTopReaderContext().leaves();
switch (leaves.size()) {
case 0:
// no fields
return null;
case 1:
// already an atomic reader / reader with one leave
return getDocValues(leaves.get(0).reader(), field, puller);
default:
final List<DocValuesSlice> slices = new ArrayList<DocValuesSlice>();
TypePromoter promotedType = TypePromoter.getIdentityPromoter();
// gather all docvalues fields, accumulating a promoted type across
// potentially incompatible types
for (final AtomicReaderContext ctx : leaves) {
final AtomicReader r = ctx.reader();
final DocValues d = puller.pull(r, field);
if (d != null) {
TypePromoter incoming = TypePromoter.create(d.getType(), d.getValueSize());
promotedType = promotedType.promote(incoming);
} else if (puller.stopLoadingOnNull(r, field)){
return null;
}
slices.add(new DocValuesSlice(d, ctx.docBase, r.maxDoc()));
}
// return null if no docvalues encountered anywhere
if (promotedType == TypePromoter.getIdentityPromoter()) {
return null;
}
// populate starts and fill gaps with empty docvalues
int starts[] = new int[slices.size()];
for (int i = 0; i < slices.size(); i++) {
DocValuesSlice slice = slices.get(i);
starts[i] = slice.start;
if (slice.docValues == null) {
Type promoted = promotedType.type();
switch(promoted) {
case BYTES_FIXED_DEREF:
case BYTES_FIXED_STRAIGHT:
case BYTES_FIXED_SORTED:
assert promotedType.getValueSize() >= 0;
slice.docValues = new EmptyFixedDocValues(slice.length, promoted, promotedType.getValueSize());
break;
default:
slice.docValues = new EmptyDocValues(slice.length, promoted);
}
}
}
return new MultiDocValues(slices.toArray(new DocValuesSlice[slices.size()]), starts, promotedType);
}
}
| private static DocValues getDocValues(IndexReader reader, final String field, final DocValuesPuller puller) throws IOException {
if (reader instanceof AtomicReader) {
// already an atomic reader
return puller.pull((AtomicReader) reader, field);
}
assert reader instanceof CompositeReader;
final List<AtomicReaderContext> leaves = reader.leaves();
switch (leaves.size()) {
case 0:
// no fields
return null;
case 1:
// already an atomic reader / reader with one leave
return getDocValues(leaves.get(0).reader(), field, puller);
default:
final List<DocValuesSlice> slices = new ArrayList<DocValuesSlice>();
TypePromoter promotedType = TypePromoter.getIdentityPromoter();
// gather all docvalues fields, accumulating a promoted type across
// potentially incompatible types
for (final AtomicReaderContext ctx : leaves) {
final AtomicReader r = ctx.reader();
final DocValues d = puller.pull(r, field);
if (d != null) {
TypePromoter incoming = TypePromoter.create(d.getType(), d.getValueSize());
promotedType = promotedType.promote(incoming);
} else if (puller.stopLoadingOnNull(r, field)){
return null;
}
slices.add(new DocValuesSlice(d, ctx.docBase, r.maxDoc()));
}
// return null if no docvalues encountered anywhere
if (promotedType == TypePromoter.getIdentityPromoter()) {
return null;
}
// populate starts and fill gaps with empty docvalues
int starts[] = new int[slices.size()];
for (int i = 0; i < slices.size(); i++) {
DocValuesSlice slice = slices.get(i);
starts[i] = slice.start;
if (slice.docValues == null) {
Type promoted = promotedType.type();
switch(promoted) {
case BYTES_FIXED_DEREF:
case BYTES_FIXED_STRAIGHT:
case BYTES_FIXED_SORTED:
assert promotedType.getValueSize() >= 0;
slice.docValues = new EmptyFixedDocValues(slice.length, promoted, promotedType.getValueSize());
break;
default:
slice.docValues = new EmptyDocValues(slice.length, promoted);
}
}
}
return new MultiDocValues(slices.toArray(new DocValuesSlice[slices.size()]), starts, promotedType);
}
}
|
private void createIndex(IndexWriterConfig config, Directory target, IndexReader reader, Filter preserveFilter, boolean negateFilter) throws IOException {
boolean success = false;
final IndexWriter w = new IndexWriter(target, config);
try {
final List<AtomicReaderContext> leaves = reader.getTopReaderContext().leaves();
final IndexReader[] subReaders = new IndexReader[leaves.size()];
int i = 0;
for (final AtomicReaderContext ctx : leaves) {
subReaders[i++] = new DocumentFilteredAtomicIndexReader(ctx, preserveFilter, negateFilter);
}
w.addIndexes(subReaders);
success = true;
} finally {
if (success) {
IOUtils.close(w);
} else {
IOUtils.closeWhileHandlingException(w);
}
}
}
| private void createIndex(IndexWriterConfig config, Directory target, IndexReader reader, Filter preserveFilter, boolean negateFilter) throws IOException {
boolean success = false;
final IndexWriter w = new IndexWriter(target, config);
try {
final List<AtomicReaderContext> leaves = reader.leaves();
final IndexReader[] subReaders = new IndexReader[leaves.size()];
int i = 0;
for (final AtomicReaderContext ctx : leaves) {
subReaders[i++] = new DocumentFilteredAtomicIndexReader(ctx, preserveFilter, negateFilter);
}
w.addIndexes(subReaders);
success = true;
} finally {
if (success) {
IOUtils.close(w);
} else {
IOUtils.closeWhileHandlingException(w);
}
}
}
|
public static long getTotalTermFreq(IndexReader reader, final String field, final BytesRef termText) throws Exception {
long totalTF = 0L;
for (final AtomicReaderContext ctx : reader.getTopReaderContext().leaves()) {
AtomicReader r = ctx.reader();
Bits liveDocs = r.getLiveDocs();
if (liveDocs == null) {
// TODO: we could do this up front, during the scan
// (next()), instead of after-the-fact here w/ seek,
// if the codec supports it and there are no del
// docs...
final long totTF = r.totalTermFreq(field, termText);
if (totTF != -1) {
totalTF += totTF;
continue;
} // otherwise we fall-through
}
// note: what should we do if field omits freqs? currently it counts as 1...
DocsEnum de = r.termDocsEnum(liveDocs, field, termText);
if (de != null) {
while (de.nextDoc() != DocIdSetIterator.NO_MORE_DOCS)
totalTF += de.freq();
}
}
return totalTF;
}
| public static long getTotalTermFreq(IndexReader reader, final String field, final BytesRef termText) throws Exception {
long totalTF = 0L;
for (final AtomicReaderContext ctx : reader.leaves()) {
AtomicReader r = ctx.reader();
Bits liveDocs = r.getLiveDocs();
if (liveDocs == null) {
// TODO: we could do this up front, during the scan
// (next()), instead of after-the-fact here w/ seek,
// if the codec supports it and there are no del
// docs...
final long totTF = r.totalTermFreq(field, termText);
if (totTF != -1) {
totalTF += totTF;
continue;
} // otherwise we fall-through
}
// note: what should we do if field omits freqs? currently it counts as 1...
DocsEnum de = r.termDocsEnum(liveDocs, field, termText);
if (de != null) {
while (de.nextDoc() != DocIdSetIterator.NO_MORE_DOCS)
totalTF += de.freq();
}
}
return totalTF;
}
|
public void setContext( TransformContext context ) {
try {
IndexReader reader = qparser.getReq().getSearcher().getIndexReader();
readerContexts = reader.getTopReaderContext().leaves();
docValuesArr = new FunctionValues[readerContexts.size()];
searcher = qparser.getReq().getSearcher();
fcontext = ValueSource.newContext(searcher);
this.valueSource.createWeight(fcontext, searcher);
} catch (IOException e) {
throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, e);
}
}
| public void setContext( TransformContext context ) {
try {
IndexReader reader = qparser.getReq().getSearcher().getIndexReader();
readerContexts = reader.leaves();
docValuesArr = new FunctionValues[readerContexts.size()];
searcher = qparser.getReq().getSearcher();
fcontext = ValueSource.newContext(searcher);
this.valueSource.createWeight(fcontext, searcher);
} catch (IOException e) {
throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, e);
}
}
|
public static SimpleOrderedMap<Object> getIndexInfo(DirectoryReader reader) throws IOException {
Directory dir = reader.directory();
SimpleOrderedMap<Object> indexInfo = new SimpleOrderedMap<Object>();
indexInfo.add("numDocs", reader.numDocs());
indexInfo.add("maxDoc", reader.maxDoc());
indexInfo.add("version", reader.getVersion()); // TODO? Is this different then: IndexReader.getCurrentVersion( dir )?
indexInfo.add("segmentCount", reader.getTopReaderContext().leaves().size());
indexInfo.add("current", reader.isCurrent() );
indexInfo.add("hasDeletions", reader.hasDeletions() );
indexInfo.add("directory", dir );
indexInfo.add("userData", reader.getIndexCommit().getUserData());
String s = reader.getIndexCommit().getUserData().get(SolrIndexWriter.COMMIT_TIME_MSEC_KEY);
if (s != null) {
indexInfo.add("lastModified", new Date(Long.parseLong(s)));
}
return indexInfo;
}
| public static SimpleOrderedMap<Object> getIndexInfo(DirectoryReader reader) throws IOException {
Directory dir = reader.directory();
SimpleOrderedMap<Object> indexInfo = new SimpleOrderedMap<Object>();
indexInfo.add("numDocs", reader.numDocs());
indexInfo.add("maxDoc", reader.maxDoc());
indexInfo.add("version", reader.getVersion()); // TODO? Is this different then: IndexReader.getCurrentVersion( dir )?
indexInfo.add("segmentCount", reader.leaves().size());
indexInfo.add("current", reader.isCurrent() );
indexInfo.add("hasDeletions", reader.hasDeletions() );
indexInfo.add("directory", dir );
indexInfo.add("userData", reader.getIndexCommit().getUserData());
String s = reader.getIndexCommit().getUserData().get(SolrIndexWriter.COMMIT_TIME_MSEC_KEY);
if (s != null) {
indexInfo.add("lastModified", new Date(Long.parseLong(s)));
}
return indexInfo;
}
|
public static void enumeratekeys(String ssTableFile, PrintStream outs)
throws IOException
{
IPartitioner partitioner = StorageService.getPartitioner();
BufferedRandomAccessFile input = new BufferedRandomAccessFile(SSTable.indexFilename(ssTableFile), "r");
while (!input.isEOF())
{
DecoratedKey decoratedKey = partitioner.convertFromDiskFormat(input.readUTF());
long dataPosition = input.readLong();
outs.println(asStr(decoratedKey.key));
}
outs.flush();
}
| public static void enumeratekeys(String ssTableFile, PrintStream outs)
throws IOException
{
IPartitioner partitioner = StorageService.getPartitioner();
BufferedRandomAccessFile input = new BufferedRandomAccessFile(SSTable.indexFilename(ssTableFile), "r");
while (!input.isEOF())
{
DecoratedKey decoratedKey = partitioner.convertFromDiskFormat(FBUtilities.readShortByteArray(input));
long dataPosition = input.readLong();
outs.println(asStr(decoratedKey.key));
}
outs.flush();
}
|
private static Path prepareInput(FileSystem fs, List<?> population)
throws IOException {
Path inpath = new Path(fs.getWorkingDirectory(), "input");
// Delete the input if it already exists
if (fs.exists(inpath)) {
FileUtil.fullyDelete(fs, inpath);
}
fs.mkdirs(inpath);
storePopulation(fs, new Path(inpath, "population"), population);
return inpath;
}
| private static Path prepareInput(FileSystem fs, List<?> population)
throws IOException {
Path inpath = new Path(fs.getWorkingDirectory(), "input");
// Delete the input if it already exists
if (fs.exists(inpath)) {
fs.delete(inpath, true);
}
fs.mkdirs(inpath);
storePopulation(fs, new Path(inpath, "population"), population);
return inpath;
}
|
public static Path prepareOutput(FileSystem fs) throws IOException {
Path outpath = new Path(fs.getWorkingDirectory(), "output");
if (fs.exists(outpath)) {
FileUtil.fullyDelete(fs, outpath);
}
return outpath;
}
| public static Path prepareOutput(FileSystem fs) throws IOException {
Path outpath = new Path(fs.getWorkingDirectory(), "output");
if (fs.exists(outpath)) {
fs.delete(outpath, true);
}
return outpath;
}
|