buggy_function
stringlengths
1
391k
fixed_function
stringlengths
0
392k
public void testBufferOverflow() throws Exception { StringBuilder testBuilder = new StringBuilder(HTMLStripCharFilter.DEFAULT_READ_AHEAD + 50); testBuilder.append("ah<?> "); appendChars(testBuilder, HTMLStripCharFilter.DEFAULT_READ_AHEAD + 500); processBuffer(testBuilder.toString(), "Failed on pseudo proc. instr.");//processing instructions testBuilder.setLength(0); testBuilder.append("<!--");//comments appendChars(testBuilder, 3*HTMLStripCharFilter.DEFAULT_READ_AHEAD + 500);//comments have two lookaheads testBuilder.append("-->foo"); processBuffer(testBuilder.toString(), "Failed w/ comment"); testBuilder.setLength(0); testBuilder.append("<?"); appendChars(testBuilder, HTMLStripCharFilter.DEFAULT_READ_AHEAD + 500); testBuilder.append("?>"); processBuffer(testBuilder.toString(), "Failed with proc. instr."); testBuilder.setLength(0); testBuilder.append("<b "); appendChars(testBuilder, HTMLStripCharFilter.DEFAULT_READ_AHEAD + 500); testBuilder.append("/>"); processBuffer(testBuilder.toString(), "Failed on tag"); }
public void testBufferOverflow() throws Exception { StringBuilder testBuilder = new StringBuilder(HTMLStripCharFilter.DEFAULT_READ_AHEAD + 50); testBuilder.append("ah<?> ??????"); appendChars(testBuilder, HTMLStripCharFilter.DEFAULT_READ_AHEAD + 500); processBuffer(testBuilder.toString(), "Failed on pseudo proc. instr.");//processing instructions testBuilder.setLength(0); testBuilder.append("<!--");//comments appendChars(testBuilder, 3*HTMLStripCharFilter.DEFAULT_READ_AHEAD + 500);//comments have two lookaheads testBuilder.append("-->foo"); processBuffer(testBuilder.toString(), "Failed w/ comment"); testBuilder.setLength(0); testBuilder.append("<?"); appendChars(testBuilder, HTMLStripCharFilter.DEFAULT_READ_AHEAD + 500); testBuilder.append("?>"); processBuffer(testBuilder.toString(), "Failed with proc. instr."); testBuilder.setLength(0); testBuilder.append("<b "); appendChars(testBuilder, HTMLStripCharFilter.DEFAULT_READ_AHEAD + 500); testBuilder.append("/>"); processBuffer(testBuilder.toString(), "Failed on tag"); }
public void testMissingSubcolumn() { SuperColumn sc = new SuperColumn("sc1".getBytes(), LongType.instance, ClockType.Timestamp, new TimestampReconciler()); sc.addColumn(new Column(getBytes(1), "value".getBytes(), new TimestampClock(1))); assertNotNull(sc.getSubColumn(getBytes(1))); assertNull(sc.getSubColumn(getBytes(2))); }
public void testMissingSubcolumn() { SuperColumn sc = new SuperColumn("sc1".getBytes(), LongType.instance, ClockType.Timestamp, TimestampReconciler.instance); sc.addColumn(new Column(getBytes(1), "value".getBytes(), new TimestampClock(1))); assertNotNull(sc.getSubColumn(getBytes(1))); assertNull(sc.getSubColumn(getBytes(2))); }
private ColumnFamilyStore(String table, String columnFamilyName, IPartitioner partitioner, int generation, CFMetaData metadata) { assert metadata != null : "null metadata for " + table + ":" + columnFamilyName; table_ = table; columnFamily_ = columnFamilyName; this.metadata = metadata; this.partitioner_ = partitioner; fileIndexGenerator_.set(generation); memtable_ = new Memtable(this, partitioner_); binaryMemtable_ = new AtomicReference<BinaryMemtable>(new BinaryMemtable(this)); if (logger_.isDebugEnabled()) logger_.debug("Starting CFS {}", columnFamily_); // scan for data files corresponding to this CF List<File> sstableFiles = new ArrayList<File>(); for (File file : files(table, columnFamilyName)) { if (file.getName().contains("-Data.db")) { sstableFiles.add(file.getAbsoluteFile()); } } Collections.sort(sstableFiles, new FileUtils.FileComparator()); /* Load the index files and the Bloom Filters associated with them. */ List<SSTableReader> sstables = new ArrayList<SSTableReader>(); for (File file : sstableFiles) { String filename = file.getAbsolutePath(); if (SSTable.deleteIfCompacted(filename)) continue; SSTableReader sstable; try { sstable = SSTableReader.open(Descriptor.fromFilename(filename), metadata, partitioner_); } catch (IOException ex) { logger_.error("Corrupt file " + filename + "; skipped", ex); continue; } sstables.add(sstable); } ssTables_ = new SSTableTracker(table, columnFamilyName); ssTables_.add(sstables); indexedColumns_ = new TreeMap<byte[], ColumnFamilyStore>(BytesType.instance); for (Map.Entry<byte[], ColumnDefinition> entry : metadata.column_metadata.entrySet()) { byte[] column = entry.getKey(); ColumnDefinition info = entry.getValue(); if (info.index_type == null) continue; String indexedCfName = columnFamily_ + "." + (info.index_name == null ? FBUtilities.bytesToHex(column) : info.index_name); IPartitioner rowPartitioner = StorageService.getPartitioner(); AbstractType columnComparator = (rowPartitioner instanceof OrderPreservingPartitioner || rowPartitioner instanceof ByteOrderedPartitioner) ? BytesType.instance : new LocalByPartionerType(StorageService.getPartitioner()); CFMetaData indexedCfMetadata = new CFMetaData(table, indexedCfName, ColumnFamilyType.Standard, ClockType.Timestamp, columnComparator, null, new TimestampReconciler(), "", 0, false, 0, 0, CFMetaData.DEFAULT_GC_GRACE_SECONDS, Collections.<byte[], ColumnDefinition>emptyMap()); ColumnFamilyStore indexedCfs = ColumnFamilyStore.createColumnFamilyStore(table, indexedCfName, new LocalPartitioner(metadata.column_metadata.get(column).validator), indexedCfMetadata); indexedColumns_.put(column, indexedCfs); } }
private ColumnFamilyStore(String table, String columnFamilyName, IPartitioner partitioner, int generation, CFMetaData metadata) { assert metadata != null : "null metadata for " + table + ":" + columnFamilyName; table_ = table; columnFamily_ = columnFamilyName; this.metadata = metadata; this.partitioner_ = partitioner; fileIndexGenerator_.set(generation); memtable_ = new Memtable(this, partitioner_); binaryMemtable_ = new AtomicReference<BinaryMemtable>(new BinaryMemtable(this)); if (logger_.isDebugEnabled()) logger_.debug("Starting CFS {}", columnFamily_); // scan for data files corresponding to this CF List<File> sstableFiles = new ArrayList<File>(); for (File file : files(table, columnFamilyName)) { if (file.getName().contains("-Data.db")) { sstableFiles.add(file.getAbsoluteFile()); } } Collections.sort(sstableFiles, new FileUtils.FileComparator()); /* Load the index files and the Bloom Filters associated with them. */ List<SSTableReader> sstables = new ArrayList<SSTableReader>(); for (File file : sstableFiles) { String filename = file.getAbsolutePath(); if (SSTable.deleteIfCompacted(filename)) continue; SSTableReader sstable; try { sstable = SSTableReader.open(Descriptor.fromFilename(filename), metadata, partitioner_); } catch (IOException ex) { logger_.error("Corrupt file " + filename + "; skipped", ex); continue; } sstables.add(sstable); } ssTables_ = new SSTableTracker(table, columnFamilyName); ssTables_.add(sstables); indexedColumns_ = new TreeMap<byte[], ColumnFamilyStore>(BytesType.instance); for (Map.Entry<byte[], ColumnDefinition> entry : metadata.column_metadata.entrySet()) { byte[] column = entry.getKey(); ColumnDefinition info = entry.getValue(); if (info.index_type == null) continue; String indexedCfName = columnFamily_ + "." + (info.index_name == null ? FBUtilities.bytesToHex(column) : info.index_name); IPartitioner rowPartitioner = StorageService.getPartitioner(); AbstractType columnComparator = (rowPartitioner instanceof OrderPreservingPartitioner || rowPartitioner instanceof ByteOrderedPartitioner) ? BytesType.instance : new LocalByPartionerType(StorageService.getPartitioner()); CFMetaData indexedCfMetadata = new CFMetaData(table, indexedCfName, ColumnFamilyType.Standard, ClockType.Timestamp, columnComparator, null, TimestampReconciler.instance, "", 0, false, 0, 0, CFMetaData.DEFAULT_GC_GRACE_SECONDS, Collections.<byte[], ColumnDefinition>emptyMap()); ColumnFamilyStore indexedCfs = ColumnFamilyStore.createColumnFamilyStore(table, indexedCfName, new LocalPartitioner(metadata.column_metadata.get(column).validator), indexedCfMetadata); indexedColumns_.put(column, indexedCfs); } }
private CFMetaData convertToCFMetaData(CfDef cf_def) throws InvalidRequestException, ConfigurationException { ColumnFamilyType cfType = ColumnFamilyType.create(cf_def.column_type); if (cfType == null) { throw new InvalidRequestException("Invalid column type " + cf_def.column_type); } ClockType clockType = ClockType.create(cf_def.clock_type); if (clockType == null) { throw new InvalidRequestException("Invalid clock type " + cf_def.clock_type); } AbstractReconciler reconciler = DatabaseDescriptor.getReconciler(cf_def.reconciler); if (reconciler == null) { if (clockType == ClockType.Timestamp) reconciler = new TimestampReconciler(); // default else throw new ConfigurationException("No reconciler specified for column family " + cf_def.name); } return new CFMetaData(cf_def.keyspace, cf_def.name, cfType, clockType, DatabaseDescriptor.getComparator(cf_def.comparator_type), cf_def.subcomparator_type.length() == 0 ? null : DatabaseDescriptor.getComparator(cf_def.subcomparator_type), reconciler, cf_def.comment, cf_def.row_cache_size, cf_def.preload_row_cache, cf_def.key_cache_size, cf_def.read_repair_chance, cf_def.isSetGc_grace_seconds() ? cf_def.gc_grace_seconds : CFMetaData.DEFAULT_GC_GRACE_SECONDS, ColumnDefinition.fromColumnDef(cf_def.column_metadata)); }
private CFMetaData convertToCFMetaData(CfDef cf_def) throws InvalidRequestException, ConfigurationException { ColumnFamilyType cfType = ColumnFamilyType.create(cf_def.column_type); if (cfType == null) { throw new InvalidRequestException("Invalid column type " + cf_def.column_type); } ClockType clockType = ClockType.create(cf_def.clock_type); if (clockType == null) { throw new InvalidRequestException("Invalid clock type " + cf_def.clock_type); } AbstractReconciler reconciler = DatabaseDescriptor.getReconciler(cf_def.reconciler); if (reconciler == null) { if (clockType == ClockType.Timestamp) reconciler = TimestampReconciler.instance; // default else throw new ConfigurationException("No reconciler specified for column family " + cf_def.name); } return new CFMetaData(cf_def.keyspace, cf_def.name, cfType, clockType, DatabaseDescriptor.getComparator(cf_def.comparator_type), cf_def.subcomparator_type.length() == 0 ? null : DatabaseDescriptor.getComparator(cf_def.subcomparator_type), reconciler, cf_def.comment, cf_def.row_cache_size, cf_def.preload_row_cache, cf_def.key_cache_size, cf_def.read_repair_chance, cf_def.isSetGc_grace_seconds() ? cf_def.gc_grace_seconds : CFMetaData.DEFAULT_GC_GRACE_SECONDS, ColumnDefinition.fromColumnDef(cf_def.column_metadata)); }
public List<ColumnOrSuperColumn> get_slice(String keyspace, String key, ColumnParent column_parent, SlicePredicate predicate, int consistency_level) throws InvalidRequestException, NotFoundException, UnavailableException { if (logger.isDebugEnabled()) logger.debug("get_slice"); return multigetSliceInternal(keyspace, Arrays.asList(key), column_parent, predicate, consistency_level).get(key); }
public List<ColumnOrSuperColumn> get_slice(String keyspace, String key, ColumnParent column_parent, SlicePredicate predicate, int consistency_level) throws InvalidRequestException, UnavailableException { if (logger.isDebugEnabled()) logger.debug("get_slice"); return multigetSliceInternal(keyspace, Arrays.asList(key), column_parent, predicate, consistency_level).get(key); }
public void clear() { Arrays.fill(this.state, 0, state.length - 1, FREE); distinct = 0; freeEntries = table.length; // delta trimToSize(); }
public void clear() { Arrays.fill(this.state, FREE); distinct = 0; freeEntries = table.length; // delta trimToSize(); }
public BigDecimal getBigDecimal() { if (isNull()) return null; return new BigDecimal(value); }
public BigDecimal getBigDecimal() { if (isNull()) return null; return new BigDecimal(Double.toString(value)); }
public BigDecimal getBigDecimal() { if (isNull()) return null; return new BigDecimal(value); }
public BigDecimal getBigDecimal() { if (isNull()) return null; return new BigDecimal(Float.toString(value)); }
public BigDecimal getBigDecimal() throws StandardException { if (! isNull()) { if (value instanceof BigDecimal) return ((BigDecimal)value); if (value instanceof Number) return new BigDecimal(((Number) value).doubleValue()); } return super.getBigDecimal(); }
public BigDecimal getBigDecimal() throws StandardException { if (! isNull()) { if (value instanceof BigDecimal) return ((BigDecimal)value); if (value instanceof Number) return new BigDecimal(Double.toString(((Number) value).doubleValue())); } return super.getBigDecimal(); }
public Import(String inputFileName, String columnDelimiter, String characterDelimiter, String codeset, int noOfColumnsExpected, String columnTypes, boolean lobsInExtFile, int importCounter ) throws SQLException { try{ this.inputFileName = inputFileName; this.noOfColumnsExpected = noOfColumnsExpected; this.tableColumnTypesStr = columnTypes; controlFileReader = new ControlInfo(); controlFileReader.setControlProperties(characterDelimiter, columnDelimiter, codeset); this.lobsInExtFile = lobsInExtFile; _importers.put( new Integer( importCounter ), this ); doImport(); }catch(Exception e) { throw LoadError.unexpectedError(e); } }
public Import(String inputFileName, String columnDelimiter, String characterDelimiter, String codeset, int noOfColumnsExpected, String columnTypes, boolean lobsInExtFile, int importCounter ) throws SQLException { try{ this.inputFileName = inputFileName; this.noOfColumnsExpected = noOfColumnsExpected; this.tableColumnTypesStr = columnTypes; controlFileReader = new ControlInfo(); controlFileReader.setControlProperties(characterDelimiter, columnDelimiter, codeset); this.lobsInExtFile = lobsInExtFile; _importers.put( new Integer( importCounter ), this ); doImport(); }catch(Exception e) { throw importError(e); } }
private void fill() throws IOException { StringBuilder buffered = new StringBuilder(); char [] temp = new char [1024]; for (int cnt = in.read(temp); cnt > 0; cnt = in.read(temp)) { buffered.append(temp, 0, cnt); } transformedInput = new StringReader(processPattern(buffered).toString()); }
private void fill() throws IOException { StringBuilder buffered = new StringBuilder(); char [] temp = new char [1024]; for (int cnt = input.read(temp); cnt > 0; cnt = input.read(temp)) { buffered.append(temp, 0, cnt); } transformedInput = new StringReader(processPattern(buffered).toString()); }
private void prepareXATransaction(Xid xid) throws DRDAProtocolException { XAResource xaResource = getXAResource(); int xaRetVal = xaResource.XA_OK; try { xaResource.prepare(xid); if (SanityManager.DEBUG) { connThread.trace("prepared xa transaction: xaRetVal=" + xaRetVal); } } catch (XAException xe) { xaRetVal = processXAException(xe); } writeSYNCCRD(CodePoint.SYNCTYPE_PREPARE, xaRetVal, null); }
private void prepareXATransaction(Xid xid) throws DRDAProtocolException { XAResource xaResource = getXAResource(); int xaRetVal = xaResource.XA_OK; try { xaRetVal = xaResource.prepare(xid); if (SanityManager.DEBUG) { connThread.trace("prepared xa transaction: xaRetVal=" + xaRetVal); } } catch (XAException xe) { xaRetVal = processXAException(xe); } writeSYNCCRD(CodePoint.SYNCTYPE_PREPARE, xaRetVal, null); }
public int getTransactionTimeout() throws XAException { if (conn_.agent_.loggingEnabled()) { conn_.agent_.logWriter_.traceEntry(this, "getTransactionTimeout"); } exceptionsOnXA = null; if (conn_.isPhysicalConnClosed()) { connectionClosedFailure(); } if (conn_.agent_.loggingEnabled()) { conn_.agent_.logWriter_.traceExit(this, "getTransactionTimeout", 0); } return 0; // we don't support transaction timeout } /** * Ask the resource manager to prepare for a transaction commit of the transaction specified in xid. * * @param xid A global transaction identifier * * @return A value indicating the resource manager's vote on the outcome of the transaction. The possible values * are: XA_RDONLY or XA_OK. If the resource manager wants to roll back the transaction, it should do so by * raising an appropriate XAException in the prepare method. * * @throws XAException An error has occurred. Possible exception values are: XA_RB*, XAER_RMERR, XAER_RMFAIL, * XAER_NOTA, XAER_INVAL, or XAER_PROTO. */ public int prepare(Xid xid) throws XAException { // public interface for prepare // just call prepareX with the recursion flag set to true exceptionsOnXA = null; if (conn_.agent_.loggingEnabled()) { conn_.agent_.logWriter_.traceEntry(this, "prepare", xid); } if (conn_.isPhysicalConnClosed()) { connectionClosedFailure(); } /// update the XACallInfo NetAgent netAgent = conn_.netAgent_; int rc = XAResource.XA_OK; NetXACallInfo callInfo = callInfoArray_[conn_.currXACallInfoOffset_]; callInfo.xid_ = xid; callInfo.xaResource_ = this; callInfo.xaRetVal_ = XARETVAL_XAOK; // initialize XARETVAL try { netAgent.beginWriteChainOutsideUOW(); // sent the prepare PROTOCOL netAgent.netConnectionRequest_.writeXaPrepare(conn_); netAgent.flowOutsideUOW(); // read the reply to the prepare rc = netAgent.netConnectionReply_.readXaPrepare(conn_); if ((callInfo.xaRetVal_ != XARETVAL_XAOK) && (callInfo.xaRetVal_ != XARETVAL_XARDONLY)) { // xaRetVal has possible error, format it callInfo.xaFunction_ = XAFUNC_PREPARE; rc = xaRetValErrorAccumSQL(callInfo, rc); callInfo.xaRetVal_ = XARETVAL_XAOK; // re-initialize XARETVAL } netAgent.endReadChain(); } catch (SqlException sqle) { rc = XAException.XAER_RMERR; exceptionsOnXA = org.apache.derby.client.am.Utils.accumulateSQLException (sqle, exceptionsOnXA); } finally { conn_.pendingEndXACallinfoOffset_ = -1; // indicate no pending callinfo } if (rc != XAResource.XA_OK) { throwXAException(rc, false); } if (conn_.agent_.loggingEnabled()) { conn_.agent_.logWriter_.traceExit(this, "prepare", rc); } return rc; }
public int getTransactionTimeout() throws XAException { if (conn_.agent_.loggingEnabled()) { conn_.agent_.logWriter_.traceEntry(this, "getTransactionTimeout"); } exceptionsOnXA = null; if (conn_.isPhysicalConnClosed()) { connectionClosedFailure(); } if (conn_.agent_.loggingEnabled()) { conn_.agent_.logWriter_.traceExit(this, "getTransactionTimeout", 0); } return 0; // we don't support transaction timeout } /** * Ask the resource manager to prepare for a transaction commit of the transaction specified in xid. * * @param xid A global transaction identifier * * @return A value indicating the resource manager's vote on the outcome of the transaction. The possible values * are: XA_RDONLY or XA_OK. If the resource manager wants to roll back the transaction, it should do so by * raising an appropriate XAException in the prepare method. * * @throws XAException An error has occurred. Possible exception values are: XA_RB*, XAER_RMERR, XAER_RMFAIL, * XAER_NOTA, XAER_INVAL, or XAER_PROTO. */ public int prepare(Xid xid) throws XAException { // public interface for prepare // just call prepareX with the recursion flag set to true exceptionsOnXA = null; if (conn_.agent_.loggingEnabled()) { conn_.agent_.logWriter_.traceEntry(this, "prepare", xid); } if (conn_.isPhysicalConnClosed()) { connectionClosedFailure(); } /// update the XACallInfo NetAgent netAgent = conn_.netAgent_; int rc = XAResource.XA_OK; NetXACallInfo callInfo = callInfoArray_[conn_.currXACallInfoOffset_]; callInfo.xid_ = xid; callInfo.xaResource_ = this; callInfo.xaRetVal_ = XARETVAL_XAOK; // initialize XARETVAL try { netAgent.beginWriteChainOutsideUOW(); // sent the prepare PROTOCOL netAgent.netConnectionRequest_.writeXaPrepare(conn_); netAgent.flowOutsideUOW(); // read the reply to the prepare rc = netAgent.netConnectionReply_.readXaPrepare(conn_); if ((callInfo.xaRetVal_ != XARETVAL_XAOK) && (callInfo.xaRetVal_ != XARETVAL_XARDONLY)) { // xaRetVal has possible error, format it callInfo.xaFunction_ = XAFUNC_PREPARE; rc = xaRetValErrorAccumSQL(callInfo, rc); callInfo.xaRetVal_ = XARETVAL_XAOK; // re-initialize XARETVAL } netAgent.endReadChain(); } catch (SqlException sqle) { rc = XAException.XAER_RMERR; exceptionsOnXA = org.apache.derby.client.am.Utils.accumulateSQLException (sqle, exceptionsOnXA); } finally { conn_.pendingEndXACallinfoOffset_ = -1; // indicate no pending callinfo } if ((rc != XAResource.XA_OK ) && (rc != XAResource.XA_RDONLY)) { throwXAException(rc, false); } if (conn_.agent_.loggingEnabled()) { conn_.agent_.logWriter_.traceExit(this, "prepare", rc); } return rc; }
public static boolean getAttribute(Element element, String attributeName, boolean deflt) { String result = element.getAttribute(attributeName); if ((result == null) || ("".equals(result))) { return deflt; } return Boolean.getBoolean(result); }
public static boolean getAttribute(Element element, String attributeName, boolean deflt) { String result = element.getAttribute(attributeName); if ((result == null) || ("".equals(result))) { return deflt; } return Boolean.valueOf(result).booleanValue(); }
public void emitPoint(Vector point, OutputCollector<Text, Text> collector) throws IOException { collector.collect(new Text(formatCanopy(this)), new Text(point .asFormatString())); }
public void emitPoint(Vector point, OutputCollector<Text, Text> collector) throws IOException { collector.collect(new Text(this.getIdentifier()), new Text(point .asFormatString())); }
protected FromBaseTable getResultColumnList(ResultColumnList inputRcl) throws StandardException { /* Get a ResultColumnList representing all the columns in the target */ FromBaseTable fbt = (FromBaseTable) (getNodeFactory().getNode( C_NodeTypes.FROM_BASE_TABLE, targetTableName, null, null, null, getContextManager()) ); fbt.bindNonVTITables( getDataDictionary(), (FromList) getNodeFactory().getNode( C_NodeTypes.FROM_LIST, getNodeFactory().doJoinOrderOptimization(), getContextManager())); getResultColumnList( fbt, inputRcl ); return fbt; }
protected FromBaseTable getResultColumnList(ResultColumnList inputRcl) throws StandardException { /* Get a ResultColumnList representing all the columns in the target */ FromBaseTable fbt = (FromBaseTable) (getNodeFactory().getNode( C_NodeTypes.FROM_BASE_TABLE, synonymTableName != null ? synonymTableName : targetTableName, null, null, null, getContextManager()) ); fbt.bindNonVTITables( getDataDictionary(), (FromList) getNodeFactory().getNode( C_NodeTypes.FROM_LIST, getNodeFactory().doJoinOrderOptimization(), getContextManager())); getResultColumnList( fbt, inputRcl ); return fbt; }
public static void main(String[] args) throws SQLException, IOException, InterruptedException, Exception, Throwable { Connection conn = null; if (args.length == 1) { driver_type = args[0]; if (!((driver_type.equalsIgnoreCase("DerbyClient")) || (driver_type .equalsIgnoreCase("Embedded")))) { printUsage(); return; } System.out.println("Test nstest starting....., using driver: " + driver_type); } else { driver_type = "DerbyClient"; } // Load the driver and get a connection to the database String jdbcUrl = ""; try { if (driver_type.equalsIgnoreCase("Embedded")) { // System.out.println("Driver embedd : " + driver_type); System.out.println("Loading the embedded driver..."); Class.forName(embedDriver).newInstance(); jdbcUrl = embedDbURL + ";" + dataEncypt + ";" + bootPwd; embeddedMode = true; } else { System.out.println("Driver type : " + driver_type); System.out.println("Loading the Derby Client driver..." + driver); Class.forName(driver).newInstance(); System.out.println("Client Driver loaded"); jdbcUrl = clientDbURL + ";" + dataEncypt + ";" + bootPwd; } if ((!embeddedMode) && START_SERVER_IN_SAME_VM) { startNetworkServer(); } prop.setProperty("user", user); prop.setProperty("password", password); System.out .println("Getting a connection using the url: " + jdbcUrl); System.out.println("JDBC url= " + jdbcUrl); conn = DriverManager.getConnection(jdbcUrl, prop); } catch (SQLException sqe) { System.out.println("\n\n " + sqe + sqe.getErrorCode() + " " + sqe.getSQLState()); if ((sqe.getErrorCode() == -4499) || sqe.getSQLState().equalsIgnoreCase("08001")) { System.out .println("\n Unable to connect, test cannot proceed. Please verify if the Network Server is started on port 1900."); // sqe.printStackTrace(); return; } } catch (ClassNotFoundException cnfe) { System.out.println("Driver not found: " + cnfe.getMessage()); cnfe.printStackTrace(); return; } catch (Exception e) { e.printStackTrace(); System.out.println("Unexpected Failure"); printException("nstest.main() method ==> ", e); } // create test schema if it does not already exist if (DbSetup.doIt(conn) == false) { System.out.println("Error in dbSetup, test will exit"); System.exit(1); } // Note that the connection is still open, we can safely close it now try { conn.close(); } catch (Exception e) { System.out .println("FAIL - Error closing the connection in nstest.main():"); printException("Closing connection in nstest.main()", e); } // check memory in separate thread-- allows us to monitor usage during // database calls // 200,000 msec = 3min, 20 sec delay between checks System.out.println("Starting memory checker thread"); MemCheck mc = new MemCheck(200000); mc.start(); // Now populate the tables using INIT_THREADS number of threads only if // the schemaCreated // flag has not been set. If so, then we assume that some other thread // from possibly // another jvm reached here and has already created the schema and // loaded the tables. // Note that we kick off threads of this object type (nstest) and use // the run method to // do the work. The key to starting the init threads is the use of the // constructor // to indicate to the thread that it is an init thread. In this case, we // pass the // value INIT to the constructor and in the run method we go to the // right section of the // code based on what value is passed in. The other possible value that // a thread can get // is TESTER which indicates that these are the main test threads. if (NsTest.schemaCreated == false) { // Table was created by this object, so we need to load it System.out .println("Kicking off initialization threads that will populate the test table"); NsTest initThreads[] = new NsTest[INIT_THREADS]; for (int i = 0; i < INIT_THREADS; i++) { initThreads[i] = new NsTest(INIT, i); initThreads[i].start(); sleep(3000); } // Wait for the init threads to finish and join back for (int i = 0; i < INIT_THREADS; i++) { initThreads[i].join(); } }// end of if(nstest.schemaCreated==false) // For informational/debug purposes, print out whether this process // created the schema if (NsTest.schemaCreated) // true means that the schema was created by // another jvm System.out .println("Schema has already been created by another process!"); // The following 2 lines are used when you want to only create the test // database that can be // used as a reference so that subsequent tests do not need to create // one of their own. // The CREATE_DATABASE_ONLY FLAG is set with the rest of the flags if (CREATE_DATABASE_ONLY) { System.out .println("Finished creating the database, TEST THREADS WILL NOT RUN!!"); // Finally also stop the memory checker thread, else the test will // remain hung! mc.stopNow = true; mc.join(); return; } // Table was created by some other object, so we assume it is already // loaded // Now kick off the actual test threads that will do the work for us. // Note that we use // the value TESTER when initializing the threads. // The total number of threads is NUMTESTER1+NUMTESTER2+NUMTESTER3 System.out .println("Kicking off test threads that will work over the test table"); int numTestThread = 0; int maxTestThreads = 1 + NUMTESTER1 + NUMTESTER2 + NUMTESTER3; NsTest testThreads[] = new NsTest[maxTestThreads]; // This loop is made of 3 subloops that will initialize the required // amount of tester threads // It uses the numTestThread variable as the array index which gets // incremented in each subloop while (numTestThread < maxTestThreads) { String runBackup = System.getProperty("derby.nstest.backupRestore"); // Check for property setting to decide the need for starting // BackupRestore thread if ((runBackup != null) && (runBackup.equalsIgnoreCase("false"))) { System.out.println("BackupRestore Thread not started..."); } else { // Otherwise, start the BackupRestore Thread by default testThreads[numTestThread] = new NsTest(BACKUP, numTestThread); testThreads[numTestThread].start(); numTestThread++; } for (int j = 0; j < NUMTESTER1; j++) { testThreads[numTestThread] = new NsTest(TESTER1, numTestThread); testThreads[numTestThread].start(); sleep(3000); numTestThread++; } for (int j = 0; j < NUMTESTER2; j++) { testThreads[numTestThread] = new NsTest(TESTER2, numTestThread); testThreads[numTestThread].start(); sleep(3000); numTestThread++; } for (int j = 0; j < NUMTESTER3; j++) { testThreads[numTestThread] = new NsTest(TESTER3, numTestThread); testThreads[numTestThread].start(); sleep(3000); numTestThread++; } } // Wait for the init threads to finish and join back for (int j = 0; j < maxTestThreads; j++) { System.out.println("Waiting for thread " + j + " to join back/finish"); testThreads[j].join(); } // Print statistics System.out.println(""); System.out.println("STATISTICS OF OPERATIONS DONE"); System.out.println("-----------------------------"); System.out.println(""); System.out.println("SUCCESSFUL: "); System.out.println(" Number of INSERTS = " + numInserts); System.out.println(" Number of UPDATES = " + numUpdates); System.out.println(" Number of DELETES = " + numDeletes); System.out.println(" Number of SELECTS = " + numSelects); System.out.println(""); System.out.println("FAILED: "); System.out.println(" Number of failed INSERTS = " + numFailedInserts); System.out.println(" Number of failed UPDATES = " + numFailedUpdates); System.out.println(" Number of failed DELETES = " + numFailedDeletes); System.out.println(" Number of failed SELECTS = " + numFailedSelects); System.out.println(""); System.out.println(" Note that this may not be the same as the server side connections made " + " to the database especially if connection pooling is employed"); System.out.println(""); System.out .println("NOTE: Failing operations could be because of locking issue that are " + "directly related to the application logic. They are not necessarily bugs."); // Finally also stop the memory checker thread mc.stopNow = true; mc.join(); System.out .println("End of test nstest! Look for 'FAIL' messages in the output and derby.log"); }// end of main
public static void main(String[] args) throws SQLException, IOException, InterruptedException, Exception, Throwable { Connection conn = null; if (args.length == 1) { driver_type = args[0]; if (!((driver_type.equalsIgnoreCase("DerbyClient")) || (driver_type .equalsIgnoreCase("Embedded")))) { printUsage(); return; } System.out.println("Test nstest starting....., using driver: " + driver_type); } else { driver_type = "DerbyClient"; } // Load the driver and get a connection to the database String jdbcUrl = ""; try { if (driver_type.equalsIgnoreCase("Embedded")) { // System.out.println("Driver embedd : " + driver_type); System.out.println("Loading the embedded driver..."); Class.forName(embedDriver).newInstance(); jdbcUrl = embedDbURL + ";" + dataEncypt + ";" + bootPwd; embeddedMode = true; } else { System.out.println("Driver type : " + driver_type); System.out.println("Loading the Derby Client driver..." + driver); Class.forName(driver).newInstance(); System.out.println("Client Driver loaded"); jdbcUrl = clientDbURL + ";" + dataEncypt + ";" + bootPwd; } if ((!embeddedMode) && START_SERVER_IN_SAME_VM) { startNetworkServer(); } prop.setProperty("user", user); prop.setProperty("password", password); System.out .println("Getting a connection using the url: " + jdbcUrl); System.out.println("JDBC url= " + jdbcUrl); conn = DriverManager.getConnection(jdbcUrl, prop); } catch (SQLException sqe) { System.out.println("\n\n " + sqe + sqe.getErrorCode() + " " + sqe.getSQLState()); if ((sqe.getErrorCode() == 40000) || sqe.getSQLState().equalsIgnoreCase("08001")) { System.out .println("\n Unable to connect, test cannot proceed. Please verify if the Network Server is started on port 1900."); // sqe.printStackTrace(); return; } } catch (ClassNotFoundException cnfe) { System.out.println("Driver not found: " + cnfe.getMessage()); cnfe.printStackTrace(); return; } catch (Exception e) { e.printStackTrace(); System.out.println("Unexpected Failure"); printException("nstest.main() method ==> ", e); } // create test schema if it does not already exist if (DbSetup.doIt(conn) == false) { System.out.println("Error in dbSetup, test will exit"); System.exit(1); } // Note that the connection is still open, we can safely close it now try { conn.close(); } catch (Exception e) { System.out .println("FAIL - Error closing the connection in nstest.main():"); printException("Closing connection in nstest.main()", e); } // check memory in separate thread-- allows us to monitor usage during // database calls // 200,000 msec = 3min, 20 sec delay between checks System.out.println("Starting memory checker thread"); MemCheck mc = new MemCheck(200000); mc.start(); // Now populate the tables using INIT_THREADS number of threads only if // the schemaCreated // flag has not been set. If so, then we assume that some other thread // from possibly // another jvm reached here and has already created the schema and // loaded the tables. // Note that we kick off threads of this object type (nstest) and use // the run method to // do the work. The key to starting the init threads is the use of the // constructor // to indicate to the thread that it is an init thread. In this case, we // pass the // value INIT to the constructor and in the run method we go to the // right section of the // code based on what value is passed in. The other possible value that // a thread can get // is TESTER which indicates that these are the main test threads. if (NsTest.schemaCreated == false) { // Table was created by this object, so we need to load it System.out .println("Kicking off initialization threads that will populate the test table"); NsTest initThreads[] = new NsTest[INIT_THREADS]; for (int i = 0; i < INIT_THREADS; i++) { initThreads[i] = new NsTest(INIT, i); initThreads[i].start(); sleep(3000); } // Wait for the init threads to finish and join back for (int i = 0; i < INIT_THREADS; i++) { initThreads[i].join(); } }// end of if(nstest.schemaCreated==false) // For informational/debug purposes, print out whether this process // created the schema if (NsTest.schemaCreated) // true means that the schema was created by // another jvm System.out .println("Schema has already been created by another process!"); // The following 2 lines are used when you want to only create the test // database that can be // used as a reference so that subsequent tests do not need to create // one of their own. // The CREATE_DATABASE_ONLY FLAG is set with the rest of the flags if (CREATE_DATABASE_ONLY) { System.out .println("Finished creating the database, TEST THREADS WILL NOT RUN!!"); // Finally also stop the memory checker thread, else the test will // remain hung! mc.stopNow = true; mc.join(); return; } // Table was created by some other object, so we assume it is already // loaded // Now kick off the actual test threads that will do the work for us. // Note that we use // the value TESTER when initializing the threads. // The total number of threads is NUMTESTER1+NUMTESTER2+NUMTESTER3 System.out .println("Kicking off test threads that will work over the test table"); int numTestThread = 0; int maxTestThreads = 1 + NUMTESTER1 + NUMTESTER2 + NUMTESTER3; NsTest testThreads[] = new NsTest[maxTestThreads]; // This loop is made of 3 subloops that will initialize the required // amount of tester threads // It uses the numTestThread variable as the array index which gets // incremented in each subloop while (numTestThread < maxTestThreads) { String runBackup = System.getProperty("derby.nstest.backupRestore"); // Check for property setting to decide the need for starting // BackupRestore thread if ((runBackup != null) && (runBackup.equalsIgnoreCase("false"))) { System.out.println("BackupRestore Thread not started..."); } else { // Otherwise, start the BackupRestore Thread by default testThreads[numTestThread] = new NsTest(BACKUP, numTestThread); testThreads[numTestThread].start(); numTestThread++; } for (int j = 0; j < NUMTESTER1; j++) { testThreads[numTestThread] = new NsTest(TESTER1, numTestThread); testThreads[numTestThread].start(); sleep(3000); numTestThread++; } for (int j = 0; j < NUMTESTER2; j++) { testThreads[numTestThread] = new NsTest(TESTER2, numTestThread); testThreads[numTestThread].start(); sleep(3000); numTestThread++; } for (int j = 0; j < NUMTESTER3; j++) { testThreads[numTestThread] = new NsTest(TESTER3, numTestThread); testThreads[numTestThread].start(); sleep(3000); numTestThread++; } } // Wait for the init threads to finish and join back for (int j = 0; j < maxTestThreads; j++) { System.out.println("Waiting for thread " + j + " to join back/finish"); testThreads[j].join(); } // Print statistics System.out.println(""); System.out.println("STATISTICS OF OPERATIONS DONE"); System.out.println("-----------------------------"); System.out.println(""); System.out.println("SUCCESSFUL: "); System.out.println(" Number of INSERTS = " + numInserts); System.out.println(" Number of UPDATES = " + numUpdates); System.out.println(" Number of DELETES = " + numDeletes); System.out.println(" Number of SELECTS = " + numSelects); System.out.println(""); System.out.println("FAILED: "); System.out.println(" Number of failed INSERTS = " + numFailedInserts); System.out.println(" Number of failed UPDATES = " + numFailedUpdates); System.out.println(" Number of failed DELETES = " + numFailedDeletes); System.out.println(" Number of failed SELECTS = " + numFailedSelects); System.out.println(""); System.out.println(" Note that this may not be the same as the server side connections made " + " to the database especially if connection pooling is employed"); System.out.println(""); System.out .println("NOTE: Failing operations could be because of locking issue that are " + "directly related to the application logic. They are not necessarily bugs."); // Finally also stop the memory checker thread mc.stopNow = true; mc.join(); System.out .println("End of test nstest! Look for 'FAIL' messages in the output and derby.log"); }// end of main
private void insertRow(String key) throws IOException { RowMutation rm = new RowMutation("Keyspace1", key.getBytes()); ColumnFamily cf = ColumnFamily.create("Keyspace1", "Standard1"); cf.addColumn(column("col1", "val1", 1L)); rm.add(cf); rm.apply(); }
private void insertRow(String key) throws IOException { RowMutation rm = new RowMutation("Keyspace1", key.getBytes()); ColumnFamily cf = ColumnFamily.create("Keyspace1", "Standard1"); cf.addColumn(column("col1", "val1", new TimestampClock(1L))); rm.add(cf); rm.apply(); }
public void testGetColumn() throws IOException, ColumnFamilyNotDefinedException { Table table = Table.open("Keyspace1"); RowMutation rm; DecoratedKey dk = Util.dk("key1"); // add data rm = new RowMutation("Keyspace1", dk.key); rm.add(new QueryPath("Standard1", null, "Column1".getBytes()), "abcd".getBytes(), 0); rm.apply(); ReadCommand command = new SliceByNamesReadCommand("Keyspace1", dk.key, new QueryPath("Standard1"), Arrays.asList("Column1".getBytes())); Row row = command.getRow(table); IColumn col = row.cf.getColumn("Column1".getBytes()); assert Arrays.equals(col.value(), "abcd".getBytes()); }
public void testGetColumn() throws IOException, ColumnFamilyNotDefinedException { Table table = Table.open("Keyspace1"); RowMutation rm; DecoratedKey dk = Util.dk("key1"); // add data rm = new RowMutation("Keyspace1", dk.key); rm.add(new QueryPath("Standard1", null, "Column1".getBytes()), "abcd".getBytes(), new TimestampClock(0)); rm.apply(); ReadCommand command = new SliceByNamesReadCommand("Keyspace1", dk.key, new QueryPath("Standard1"), Arrays.asList("Column1".getBytes())); Row row = command.getRow(table); IColumn col = row.cf.getColumn("Column1".getBytes()); assert Arrays.equals(col.value(), "abcd".getBytes()); }
private void testCompaction(String columnFamilyName, int insertsPerTable) throws IOException, ExecutionException, InterruptedException { CompactionManager.instance.disableAutoCompaction(); Table table = Table.open("Keyspace1"); ColumnFamilyStore store = table.getColumnFamilyStore(columnFamilyName); Set<DecoratedKey> inserted = new HashSet<DecoratedKey>(); for (int j = 0; j < insertsPerTable; j++) { DecoratedKey key = Util.dk(String.valueOf(j)); RowMutation rm = new RowMutation("Keyspace1", key.key); rm.add(new QueryPath(columnFamilyName, null, "0".getBytes()), new byte[0], j); rm.apply(); inserted.add(key); store.forceBlockingFlush(); assertEquals(inserted.size(), Util.getRangeSlice(store).rows.size()); } CompactionManager.instance.submitMajor(store).get(); assertEquals(1, store.getSSTables().size()); }
private void testCompaction(String columnFamilyName, int insertsPerTable) throws IOException, ExecutionException, InterruptedException { CompactionManager.instance.disableAutoCompaction(); Table table = Table.open("Keyspace1"); ColumnFamilyStore store = table.getColumnFamilyStore(columnFamilyName); Set<DecoratedKey> inserted = new HashSet<DecoratedKey>(); for (int j = 0; j < insertsPerTable; j++) { DecoratedKey key = Util.dk(String.valueOf(j)); RowMutation rm = new RowMutation("Keyspace1", key.key); rm.add(new QueryPath(columnFamilyName, null, "0".getBytes()), new byte[0], new TimestampClock(j)); rm.apply(); inserted.add(key); store.forceBlockingFlush(); assertEquals(inserted.size(), Util.getRangeSlice(store).rows.size()); } CompactionManager.instance.submitMajor(store).get(); assertEquals(1, store.getSSTables().size()); }
public void testSpannedIndexPositions() throws IOException, ExecutionException, InterruptedException { RowIndexedReader.BUFFER_SIZE = 40; // each index entry is ~11 bytes, so this will generate lots of spanned entries Table table = Table.open("Keyspace1"); ColumnFamilyStore store = table.getColumnFamilyStore("Standard1"); // insert a bunch of data and compact to a single sstable CompactionManager.instance.disableAutoCompaction(); for (int j = 0; j < 100; j += 2) { byte[] key = String.valueOf(j).getBytes(); RowMutation rm = new RowMutation("Keyspace1", key); rm.add(new QueryPath("Standard1", null, "0".getBytes()), new byte[0], j); rm.apply(); } store.forceBlockingFlush(); CompactionManager.instance.submitMajor(store).get(); // check that all our keys are found correctly SSTableReader sstable = store.getSSTables().iterator().next(); for (int j = 0; j < 100; j += 2) { DecoratedKey dk = Util.dk(String.valueOf(j)); FileDataInput file = sstable.getFileDataInput(dk, DatabaseDescriptor.getIndexedReadBufferSizeInKB() * 1024); DecoratedKey keyInDisk = sstable.getPartitioner().convertFromDiskFormat(FBUtilities.readShortByteArray(file)); assert keyInDisk.equals(dk) : String.format("%s != %s in %s", keyInDisk, dk, file.getPath()); } // check no false positives for (int j = 1; j < 110; j += 2) { DecoratedKey dk = Util.dk(String.valueOf(j)); assert sstable.getPosition(dk) == null; } // check positionsize information assert sstable.indexSummary.getSpannedIndexDataPositions().entrySet().size() > 0; for (Map.Entry<IndexSummary.KeyPosition, SSTable.PositionSize> entry : sstable.indexSummary.getSpannedIndexDataPositions().entrySet()) { IndexSummary.KeyPosition kp = entry.getKey(); SSTable.PositionSize info = entry.getValue(); long nextIndexPosition = kp.indexPosition + 2 + StorageService.getPartitioner().convertToDiskFormat(kp.key).length + 8; BufferedRandomAccessFile indexFile = new BufferedRandomAccessFile(sstable.indexFilename(), "r"); indexFile.seek(nextIndexPosition); String nextKey = indexFile.readUTF(); BufferedRandomAccessFile file = new BufferedRandomAccessFile(sstable.getFilename(), "r"); file.seek(info.position + info.size); assertEquals(nextKey, file.readUTF()); } }
public void testSpannedIndexPositions() throws IOException, ExecutionException, InterruptedException { RowIndexedReader.BUFFER_SIZE = 40; // each index entry is ~11 bytes, so this will generate lots of spanned entries Table table = Table.open("Keyspace1"); ColumnFamilyStore store = table.getColumnFamilyStore("Standard1"); // insert a bunch of data and compact to a single sstable CompactionManager.instance.disableAutoCompaction(); for (int j = 0; j < 100; j += 2) { byte[] key = String.valueOf(j).getBytes(); RowMutation rm = new RowMutation("Keyspace1", key); rm.add(new QueryPath("Standard1", null, "0".getBytes()), new byte[0], new TimestampClock(j)); rm.apply(); } store.forceBlockingFlush(); CompactionManager.instance.submitMajor(store).get(); // check that all our keys are found correctly SSTableReader sstable = store.getSSTables().iterator().next(); for (int j = 0; j < 100; j += 2) { DecoratedKey dk = Util.dk(String.valueOf(j)); FileDataInput file = sstable.getFileDataInput(dk, DatabaseDescriptor.getIndexedReadBufferSizeInKB() * 1024); DecoratedKey keyInDisk = sstable.getPartitioner().convertFromDiskFormat(FBUtilities.readShortByteArray(file)); assert keyInDisk.equals(dk) : String.format("%s != %s in %s", keyInDisk, dk, file.getPath()); } // check no false positives for (int j = 1; j < 110; j += 2) { DecoratedKey dk = Util.dk(String.valueOf(j)); assert sstable.getPosition(dk) == null; } // check positionsize information assert sstable.indexSummary.getSpannedIndexDataPositions().entrySet().size() > 0; for (Map.Entry<IndexSummary.KeyPosition, SSTable.PositionSize> entry : sstable.indexSummary.getSpannedIndexDataPositions().entrySet()) { IndexSummary.KeyPosition kp = entry.getKey(); SSTable.PositionSize info = entry.getValue(); long nextIndexPosition = kp.indexPosition + 2 + StorageService.getPartitioner().convertToDiskFormat(kp.key).length + 8; BufferedRandomAccessFile indexFile = new BufferedRandomAccessFile(sstable.indexFilename(), "r"); indexFile.seek(nextIndexPosition); String nextKey = indexFile.readUTF(); BufferedRandomAccessFile file = new BufferedRandomAccessFile(sstable.getFilename(), "r"); file.seek(info.position + info.size); assertEquals(nextKey, file.readUTF()); } }
public void testRepeatedDatabaseCreationWithAutoStats() throws SQLException { final String DB_NAME = "derby-memory-test"; final File DB_DIR = new File("system", DB_NAME); DataSource ds = JDBCDataSource.getDataSource(DB_NAME); // using -Xmx32M typically causes the out of memory error to appear // within 20 iterations; this program was run on Windows 7 64-bit using // jdk1.6.0_26 int iter = 0; while (iter < 50) { traceit("-- " + iter++); // remove database directory so we can start fresh each time; // the memory leak also manifests when a different directory is // used each time through, i.e. it is not required that the // database be created in the same location over and over if (PrivilegedFileOpsForTests.exists(DB_DIR)) { assertDirectoryDeleted(DB_DIR); } // create the database JDBCDataSource.setBeanProperty(ds, "createDatabase", "create"); Connection conn = ds.getConnection(); JDBCDataSource.clearStringBeanProperty(ds, "createDatabase"); // we'll use this one statement the whole time this db is open Statement s = conn.createStatement(); // create a simple schema; the presence of the index is important // somehow as the memory leak does not appear without it s.executeUpdate("CREATE TABLE TEST (CINT INT)"); s.executeUpdate("CREATE INDEX NDX ON TEST (CINT)"); // perform some updates and queries; it seems that the number of // iterations here is important and that there is a threshold that // must be crossed; e.g. in my tests the memory leak would not // manifest with 105 iterations but it would with 106 iterations for (int i = 0; i < 500; i++) { // both update and query are important; removing either one // causes the memory leak not to appear; the order in which // they are executed, however, does not seem to be important s.executeUpdate("INSERT INTO TEST VALUES(" + i + ")"); s.executeQuery("SELECT * FROM TEST WHERE CINT=" + i).close(); } // done with statement and connection s.close(); conn.close(); // shutdown this database, but not entire derby engine JDBCDataSource.setBeanProperty(ds, "shutdownDatabase", "shutdown"); try { ds.getConnection(); fail("Expected shutdown exception"); } catch (SQLException e) { assertSQLState("08006", e); } finally { JDBCDataSource.clearStringBeanProperty(ds, "shutdownDatabase"); } if (isPhoneME()) { // DERBY-5412: phoneME fails after some iterations because the // number of class names exceeds a VM limit. If we invoke // garbage collection manually, it seems to be able to reclaim // the classes that are no longer in use, and complete the test. Runtime.getRuntime().gc(); } } // extra sanity check making sure that the database was created in the // location we assumed assertTrue(PrivilegedFileOpsForTests.exists(DB_DIR)); }
public void testRepeatedDatabaseCreationWithAutoStats() throws SQLException { final String DB_NAME = "derby-memory-test"; final File DB_DIR = new File("system", DB_NAME); DataSource ds = JDBCDataSource.getDataSource(DB_NAME); // using -Xmx32M typically causes the out of memory error to appear // within 20 iterations; this program was run on Windows 7 64-bit using // jdk1.6.0_26 int iter = 0; while (iter < 50) { println("-- " + iter++); // remove database directory so we can start fresh each time; // the memory leak also manifests when a different directory is // used each time through, i.e. it is not required that the // database be created in the same location over and over if (PrivilegedFileOpsForTests.exists(DB_DIR)) { assertDirectoryDeleted(DB_DIR); } // create the database JDBCDataSource.setBeanProperty(ds, "createDatabase", "create"); Connection conn = ds.getConnection(); JDBCDataSource.clearStringBeanProperty(ds, "createDatabase"); // we'll use this one statement the whole time this db is open Statement s = conn.createStatement(); // create a simple schema; the presence of the index is important // somehow as the memory leak does not appear without it s.executeUpdate("CREATE TABLE TEST (CINT INT)"); s.executeUpdate("CREATE INDEX NDX ON TEST (CINT)"); // perform some updates and queries; it seems that the number of // iterations here is important and that there is a threshold that // must be crossed; e.g. in my tests the memory leak would not // manifest with 105 iterations but it would with 106 iterations for (int i = 0; i < 500; i++) { // both update and query are important; removing either one // causes the memory leak not to appear; the order in which // they are executed, however, does not seem to be important s.executeUpdate("INSERT INTO TEST VALUES(" + i + ")"); s.executeQuery("SELECT * FROM TEST WHERE CINT=" + i).close(); } // done with statement and connection s.close(); conn.close(); // shutdown this database, but not entire derby engine JDBCDataSource.setBeanProperty(ds, "shutdownDatabase", "shutdown"); try { ds.getConnection(); fail("Expected shutdown exception"); } catch (SQLException e) { assertSQLState("08006", e); } finally { JDBCDataSource.clearStringBeanProperty(ds, "shutdownDatabase"); } if (isPhoneME()) { // DERBY-5412: phoneME fails after some iterations because the // number of class names exceeds a VM limit. If we invoke // garbage collection manually, it seems to be able to reclaim // the classes that are no longer in use, and complete the test. Runtime.getRuntime().gc(); } } // extra sanity check making sure that the database was created in the // location we assumed assertTrue(PrivilegedFileOpsForTests.exists(DB_DIR)); }
public void eval(MockDirectoryWrapper dir) throws IOException { // Since we throw exc during abort, eg when IW is // attempting to delete files, we will leave // leftovers: dir.setAssertNoUnrefencedFilesOnClose(false); if (doFail) { StackTraceElement[] trace = new Exception().getStackTrace(); boolean sawAbortOrFlushDoc = false; boolean sawClose = false; boolean sawMerge = false; for (int i = 0; i < trace.length; i++) { if ("abort".equals(trace[i].getMethodName()) || "finishDocument".equals(trace[i].getMethodName())) { sawAbortOrFlushDoc = true; } if ("merge".equals(trace[i])) { sawMerge = true; } if ("close".equals(trace[i].getMethodName())) { sawClose = true; } } if (sawAbortOrFlushDoc && !sawClose && !sawMerge) { if (onlyOnce) doFail = false; //System.out.println(Thread.currentThread().getName() + ": now fail"); //new Throwable().printStackTrace(System.out); throw new IOException("now failing on purpose"); } } }
public void eval(MockDirectoryWrapper dir) throws IOException { // Since we throw exc during abort, eg when IW is // attempting to delete files, we will leave // leftovers: dir.setAssertNoUnrefencedFilesOnClose(false); if (doFail) { StackTraceElement[] trace = new Exception().getStackTrace(); boolean sawAbortOrFlushDoc = false; boolean sawClose = false; boolean sawMerge = false; for (int i = 0; i < trace.length; i++) { if ("abort".equals(trace[i].getMethodName()) || "finishDocument".equals(trace[i].getMethodName())) { sawAbortOrFlushDoc = true; } if ("merge".equals(trace[i].getMethodName())) { sawMerge = true; } if ("close".equals(trace[i].getMethodName())) { sawClose = true; } } if (sawAbortOrFlushDoc && !sawClose && !sawMerge) { if (onlyOnce) doFail = false; //System.out.println(Thread.currentThread().getName() + ": now fail"); //new Throwable().printStackTrace(System.out); throw new IOException("now failing on purpose"); } } }
synchronized public void closeDocStore(SegmentWriteState state) throws IOException { final int inc = state.numDocsInStore - lastDocID; if (inc > 0) { initFieldsWriter(); fill(state.numDocsInStore - docWriter.getDocStoreOffset()); } if (fieldsWriter != null) { fieldsWriter.close(); fieldsWriter = null; lastDocID = 0; assert state.docStoreSegmentName != null; state.flushedFiles.add(state.docStoreSegmentName + "." + IndexFileNames.FIELDS_EXTENSION); state.flushedFiles.add(state.docStoreSegmentName + "." + IndexFileNames.FIELDS_INDEX_EXTENSION); state.docWriter.removeOpenFile(state.docStoreSegmentName + "." + IndexFileNames.FIELDS_EXTENSION); state.docWriter.removeOpenFile(state.docStoreSegmentName + "." + IndexFileNames.FIELDS_INDEX_EXTENSION); if (4+state.numDocsInStore*8 != state.directory.fileLength(state.docStoreSegmentName + "." + IndexFileNames.FIELDS_INDEX_EXTENSION)) throw new RuntimeException("after flush: fdx size mismatch: " + state.numDocsInStore + " docs vs " + state.directory.fileLength(state.docStoreSegmentName + "." + IndexFileNames.FIELDS_INDEX_EXTENSION) + " length in bytes of " + state.docStoreSegmentName + "." + IndexFileNames.FIELDS_INDEX_EXTENSION); } }
synchronized public void closeDocStore(SegmentWriteState state) throws IOException { final int inc = state.numDocsInStore - lastDocID; if (inc > 0) { initFieldsWriter(); fill(state.numDocsInStore - docWriter.getDocStoreOffset()); } if (fieldsWriter != null) { fieldsWriter.close(); fieldsWriter = null; lastDocID = 0; assert state.docStoreSegmentName != null; state.flushedFiles.add(state.docStoreSegmentName + "." + IndexFileNames.FIELDS_EXTENSION); state.flushedFiles.add(state.docStoreSegmentName + "." + IndexFileNames.FIELDS_INDEX_EXTENSION); state.docWriter.removeOpenFile(state.docStoreSegmentName + "." + IndexFileNames.FIELDS_EXTENSION); state.docWriter.removeOpenFile(state.docStoreSegmentName + "." + IndexFileNames.FIELDS_INDEX_EXTENSION); if (4+((long) state.numDocsInStore)*8 != state.directory.fileLength(state.docStoreSegmentName + "." + IndexFileNames.FIELDS_INDEX_EXTENSION)) throw new RuntimeException("after flush: fdx size mismatch: " + state.numDocsInStore + " docs vs " + state.directory.fileLength(state.docStoreSegmentName + "." + IndexFileNames.FIELDS_INDEX_EXTENSION) + " length in bytes of " + state.docStoreSegmentName + "." + IndexFileNames.FIELDS_INDEX_EXTENSION); } }
public TermsHashConsumerPerThread addThread(TermsHashPerThread termsHashPerThread) { return new TermVectorsTermsWriterPerThread(termsHashPerThread, this); } void createPostings(RawPostingList[] postings, int start, int count) { final int end = start + count; for(int i=start;i<end;i++) postings[i] = new PostingList(); } synchronized void flush(Map threadsAndFields, final SegmentWriteState state) throws IOException { if (tvx != null) { if (state.numDocsInStore > 0) // In case there are some final documents that we // didn't see (because they hit a non-aborting exception): fill(state.numDocsInStore - docWriter.getDocStoreOffset()); tvx.flush(); tvd.flush(); tvf.flush(); } Iterator it = threadsAndFields.entrySet().iterator(); while(it.hasNext()) { Map.Entry entry = (Map.Entry) it.next(); Iterator it2 = ((Collection) entry.getValue()).iterator(); while(it2.hasNext()) { TermVectorsTermsWriterPerField perField = (TermVectorsTermsWriterPerField) it2.next(); perField.termsHashPerField.reset(); perField.shrinkHash(); } TermVectorsTermsWriterPerThread perThread = (TermVectorsTermsWriterPerThread) entry.getKey(); perThread.termsHashPerThread.reset(true); } } synchronized void closeDocStore(final SegmentWriteState state) throws IOException { if (tvx != null) { // At least one doc in this run had term vectors // enabled fill(state.numDocsInStore - docWriter.getDocStoreOffset()); tvx.close(); tvf.close(); tvd.close(); tvx = null; assert state.docStoreSegmentName != null; if (4+state.numDocsInStore*16 != state.directory.fileLength(state.docStoreSegmentName + "." + IndexFileNames.VECTORS_INDEX_EXTENSION)) throw new RuntimeException("after flush: tvx size mismatch: " + state.numDocsInStore + " docs vs " + state.directory.fileLength(state.docStoreSegmentName + "." + IndexFileNames.VECTORS_INDEX_EXTENSION) + " length in bytes of " + state.docStoreSegmentName + "." + IndexFileNames.VECTORS_INDEX_EXTENSION); state.flushedFiles.add(state.docStoreSegmentName + "." + IndexFileNames.VECTORS_INDEX_EXTENSION); state.flushedFiles.add(state.docStoreSegmentName + "." + IndexFileNames.VECTORS_FIELDS_EXTENSION); state.flushedFiles.add(state.docStoreSegmentName + "." + IndexFileNames.VECTORS_DOCUMENTS_EXTENSION); docWriter.removeOpenFile(state.docStoreSegmentName + "." + IndexFileNames.VECTORS_INDEX_EXTENSION); docWriter.removeOpenFile(state.docStoreSegmentName + "." + IndexFileNames.VECTORS_FIELDS_EXTENSION); docWriter.removeOpenFile(state.docStoreSegmentName + "." + IndexFileNames.VECTORS_DOCUMENTS_EXTENSION); lastDocID = 0; } }
public TermsHashConsumerPerThread addThread(TermsHashPerThread termsHashPerThread) { return new TermVectorsTermsWriterPerThread(termsHashPerThread, this); } void createPostings(RawPostingList[] postings, int start, int count) { final int end = start + count; for(int i=start;i<end;i++) postings[i] = new PostingList(); } synchronized void flush(Map threadsAndFields, final SegmentWriteState state) throws IOException { if (tvx != null) { if (state.numDocsInStore > 0) // In case there are some final documents that we // didn't see (because they hit a non-aborting exception): fill(state.numDocsInStore - docWriter.getDocStoreOffset()); tvx.flush(); tvd.flush(); tvf.flush(); } Iterator it = threadsAndFields.entrySet().iterator(); while(it.hasNext()) { Map.Entry entry = (Map.Entry) it.next(); Iterator it2 = ((Collection) entry.getValue()).iterator(); while(it2.hasNext()) { TermVectorsTermsWriterPerField perField = (TermVectorsTermsWriterPerField) it2.next(); perField.termsHashPerField.reset(); perField.shrinkHash(); } TermVectorsTermsWriterPerThread perThread = (TermVectorsTermsWriterPerThread) entry.getKey(); perThread.termsHashPerThread.reset(true); } } synchronized void closeDocStore(final SegmentWriteState state) throws IOException { if (tvx != null) { // At least one doc in this run had term vectors // enabled fill(state.numDocsInStore - docWriter.getDocStoreOffset()); tvx.close(); tvf.close(); tvd.close(); tvx = null; assert state.docStoreSegmentName != null; if (4+((long) state.numDocsInStore)*16 != state.directory.fileLength(state.docStoreSegmentName + "." + IndexFileNames.VECTORS_INDEX_EXTENSION)) throw new RuntimeException("after flush: tvx size mismatch: " + state.numDocsInStore + " docs vs " + state.directory.fileLength(state.docStoreSegmentName + "." + IndexFileNames.VECTORS_INDEX_EXTENSION) + " length in bytes of " + state.docStoreSegmentName + "." + IndexFileNames.VECTORS_INDEX_EXTENSION); state.flushedFiles.add(state.docStoreSegmentName + "." + IndexFileNames.VECTORS_INDEX_EXTENSION); state.flushedFiles.add(state.docStoreSegmentName + "." + IndexFileNames.VECTORS_FIELDS_EXTENSION); state.flushedFiles.add(state.docStoreSegmentName + "." + IndexFileNames.VECTORS_DOCUMENTS_EXTENSION); docWriter.removeOpenFile(state.docStoreSegmentName + "." + IndexFileNames.VECTORS_INDEX_EXTENSION); docWriter.removeOpenFile(state.docStoreSegmentName + "." + IndexFileNames.VECTORS_FIELDS_EXTENSION); docWriter.removeOpenFile(state.docStoreSegmentName + "." + IndexFileNames.VECTORS_DOCUMENTS_EXTENSION); lastDocID = 0; } }
public void setBytesValue(BytesRef value) { if (!(fieldsData instanceof BytesRef)) { throw new IllegalArgumentException("cannot change value type from " + fieldsData.getClass().getSimpleName() + " to BytesRef"); } if (type.indexed()) { throw new IllegalArgumentException("cannot set a Reader value on an indexed field"); } fieldsData = value; }
public void setBytesValue(BytesRef value) { if (!(fieldsData instanceof BytesRef)) { throw new IllegalArgumentException("cannot change value type from " + fieldsData.getClass().getSimpleName() + " to BytesRef"); } if (type.indexed()) { throw new IllegalArgumentException("cannot set a BytesRef value on an indexed field"); } fieldsData = value; }
public void copyLearnsAsExpected() { RandomUtils.useTestSeed(); final MersenneTwister gen = new MersenneTwister(1); final Exponential exp = new Exponential(.5, gen); Vector beta = new DenseVector(200); for (Vector.Element element : beta) { int sign = 1; if (gen.nextDouble() < 0.5) { sign = -1; } element.set(sign * exp.nextDouble()); } // train one copy of a wrapped learner AdaptiveLogisticRegression.Wrapper w = new AdaptiveLogisticRegression.Wrapper(2, 200, new L1()); for (int i = 0; i < 3000; i++) { AdaptiveLogisticRegression.TrainingExample r = getExample(i, gen, beta); w.train(r); if (i % 1000 == 0) { System.out.printf("%10d %.3f\n", i, w.getLearner().auc()); } } System.out.printf("%10d %.3f\n", 3000, w.getLearner().auc()); double auc1 = w.getLearner().auc(); // then switch to a copy of that learner ... progress should continue AdaptiveLogisticRegression.Wrapper w2 = w.copy(); double auc2; for (int i = 0; i < 5000; i++) { if (i % 1000 == 0) { if (i == 0) { Assert.assertEquals("Should have started with no data", 0.5, w2.getLearner().auc(), 0.0001); } if (i == 1000) { auc2 = w2.getLearner().auc(); Assert.assertTrue("Should have had head-start", Math.abs(auc2 - 0.5) > 0.1); Assert.assertTrue("AUC should improve quickly on copy", auc1 < auc2); } System.out.printf("%10d %.3f\n", i, w2.getLearner().auc()); } AdaptiveLogisticRegression.TrainingExample r = getExample(i, gen, beta); w2.train(r); } Assert.assertEquals("Original should not change after copy is updated", auc1, w.getLearner().auc(), 1e-5); // this improvement is really quite lenient Assert.assertTrue("AUC should improve substantially on copy", auc1 < w2.getLearner().auc() - 0.1); // make sure that the copy didn't lose anything Assert.assertEquals(auc1, w.getLearner().auc(), 0); }
public void copyLearnsAsExpected() { RandomUtils.useTestSeed(); final MersenneTwister gen = new MersenneTwister(1); final Exponential exp = new Exponential(.5, gen); Vector beta = new DenseVector(200); for (Vector.Element element : beta) { int sign = 1; if (gen.nextDouble() < 0.5) { sign = -1; } element.set(sign * exp.nextDouble()); } // train one copy of a wrapped learner AdaptiveLogisticRegression.Wrapper w = new AdaptiveLogisticRegression.Wrapper(2, 200, new L1()); for (int i = 0; i < 3000; i++) { AdaptiveLogisticRegression.TrainingExample r = getExample(i, gen, beta); w.train(r); if (i % 1000 == 0) { System.out.printf("%10d %.3f\n", i, w.getLearner().auc()); } } System.out.printf("%10d %.3f\n", 3000, w.getLearner().auc()); double auc1 = w.getLearner().auc(); // then switch to a copy of that learner ... progress should continue AdaptiveLogisticRegression.Wrapper w2 = w.copy(); double auc2; for (int i = 0; i < 5000; i++) { if (i % 1000 == 0) { if (i == 0) { Assert.assertEquals("Should have started with no data", 0.5, w2.getLearner().auc(), 0.0001); } if (i == 1000) { auc2 = w2.getLearner().auc(); Assert.assertTrue("Should have had head-start", Math.abs(auc2 - 0.5) > 0.1); Assert.assertTrue("AUC should improve quickly on copy", auc1 < auc2); } System.out.printf("%10d %.3f\n", i, w2.getLearner().auc()); } AdaptiveLogisticRegression.TrainingExample r = getExample(i, gen, beta); w2.train(r); } Assert.assertEquals("Original should not change after copy is updated", auc1, w.getLearner().auc(), 1e-5); // this improvement is really quite lenient Assert.assertTrue("AUC should improve significantly on copy", auc1 < w2.getLearner().auc() - 0.05); // make sure that the copy didn't lose anything Assert.assertEquals(auc1, w.getLearner().auc(), 0); }
public void test_errorcode() throws Exception { ResultSet rs = null; Connection conn = getConnection(); Statement s = conn.createStatement(); s.executeUpdate( "create table t(i int, s smallint)"); s.executeUpdate( "insert into t values (1,2)"); s.executeUpdate("insert into t values (1,2)"); s.executeUpdate("insert into t values (null,2)"); //-- parser error //-- bug 5701 assertStatementError("42X01",30000,s,"create table t(i nt, s smallint)"); //-- non-boolean where clause assertStatementError("42X19", 30000, s, "select * from t where i"); // -- invalid correlation name for "*" assertStatementError("42X10",30000, s, "select asdf.* from t"); //-- execution time error assertStatementError("22012",30000,s,"select i/0 from t"); // -- test ErrorMessages VTI rs = s.executeQuery( "select * from SYSCS_DIAG.error_Messages where " + "sql_state = '07000'"); String [][] expRS = new String [][] { {"07000", "At least one parameter to the current statement " + "is uninitialized.", "20000"} }; JDBC.assertFullResultSet(rs,expRS); // Test severe error messages. Existing messages should not change SQLState. // new ones can be added. rs = s.executeQuery("select * from SYSCS_DIAG.Error_messages where SEVERITY >= 40000 order by SQL_STATE"); //Utilities.showResultSet(rs); String [][] expectedRows = {{"08000","Connection closed by unknown interrupt.","40000"}, {"08001","A connection could not be established because the security token is larger than the maximum allowed by the network protocol.","40000"}, {"08001","A connection could not be established because the user id has a length of zero or is larger than the maximum allowed by the network protocol.","40000"}, {"08001","A connection could not be established because the password has a length of zero or is larger than the maximum allowed by the network protocol.","40000"}, {"08001","Required Derby DataSource property {0} not set.","40000"}, {"08001","{0} : Error connecting to server {1} on port {2} with message {3}.","40000"}, {"08001","SocketException: '{0}'","40000"}, {"08001","Unable to open stream on socket: '{0}'.","40000"}, {"08001","User id length ({0}) is outside the range of 1 to {1}.","40000"}, {"08001","Password length ({0}) is outside the range of 1 to {1}.","40000"}, {"08001","User id can not be null.","40000"}, {"08001","Password can not be null.","40000"}, {"08001","A connection could not be established because the database name '{0}' is larger than the maximum length allowed by the network protocol.","40000"}, {"08003","No current connection.","40000"}, {"08003","getConnection() is not valid on a closed PooledConnection.","40000"}, {"08003","Lob method called after connection was closed","40000"}, {"08003","The underlying physical connection is stale or closed.","40000"}, {"08004","Connection refused : {0}","40000"}, {"08004","Connection authentication failure occurred. Reason: {0}.","40000"}, {"08004","The connection was refused because the database {0} was not found.","40000"}, {"08004","Database connection refused.","40000"}, {"08004","User '{0}' cannot shut down database '{1}'. Only the database owner can perform this operation.","40000"}, {"08004","User '{0}' cannot (re)encrypt database '{1}'. Only the database owner can perform this operation.","40000"}, {"08004","User '{0}' cannot hard upgrade database '{1}'. Only the database owner can perform this operation.","40000"}, {"08006","An error occurred during connect reset and the connection has been terminated. See chained exceptions for details.","40000"}, {"08006","Database '{0}' shutdown.","45000"}, {"0A000","The DRDA command {0} is not currently implemented. The connection has been terminated.","40000"}, {"28502","The user name '{0}' is not valid.","40000"}, {"57017","There is no available conversion for the source code page, {0}, to the target code page, {1}. The connection has been terminated.","40000"}, {"58009","Network protocol exception: only one of the VCM, VCS length can be greater than 0. The connection has been terminated.","40000"}, {"58009","The connection was terminated because the encoding is not supported.","40000"}, {"58009","Network protocol exception: actual code point, {0}, does not match expected code point, {1}. The connection has been terminated.","40000"}, {"58009","Network protocol exception: DDM collection contains less than 4 bytes of data. The connection has been terminated.","40000"}, {"58009","Network protocol exception: collection stack not empty at end of same id chain parse. The connection has been terminated.","40000"}, {"58009","Network protocol exception: DSS length not 0 at end of same id chain parse. The connection has been terminated.","40000"}, {"58009","Network protocol exception: DSS chained with same id at end of same id chain parse. The connection has been terminated.","40000"}, {"58009","Network protocol exception: end of stream prematurely reached while reading InputStream, parameter #{0}. The connection has been terminated.","40000"}, {"58009","Network protocol exception: invalid FDOCA LID. The connection has been terminated.","40000"}, {"58009","Network protocol exception: SECTKN was not returned. The connection has been terminated.","40000"}, {"58009","Network protocol exception: only one of NVCM, NVCS can be non-null. The connection has been terminated.","40000"}, {"58009","Network protocol exception: SCLDTA length, {0}, is invalid for RDBNAM. The connection has been terminated.","40000"}, {"58009","SocketException: '{0}'","40000"}, {"58009","A communications error has been detected: {0}.","40000"}, {"58009","An error occurred during a deferred connect reset and the connection has been terminated. See chained exceptions for details.","40000"}, {"58009","Insufficient data while reading from the network - expected a minimum of {0} bytes and received only {1} bytes. The connection has been terminated.","40000"}, {"58009","Attempt to fully materialize lob data that is too large for the JVM. The connection has been terminated.","40000"}, {"58009","Network protocol exception: SCLDTA length, {0}, is invalid for RDBCOLID. The connection has been terminated.","40000"}, {"58009","Network protocol exception: SCLDTA length, {0}, is invalid for PKGID. The connection has been terminated.","40000"}, {"58009","Network protocol exception: PKGNAMCSN length, {0}, is invalid at SQLAM {1}. The connection has been terminated.","40000"}, {"58009","A network protocol error was encountered and the connection has been terminated: {0}","40000"}, {"58010","A network protocol error was encountered. A connection could not be established because the manager {0} at level {1} is not supported by the server.","40000"}, {"58014","The DDM command 0x{0} is not supported. The connection has been terminated.","40000"}, {"58015","The DDM object 0x{0} is not supported. The connection has been terminated.","40000"}, {"58016","The DDM parameter 0x{0} is not supported. The connection has been terminated.","40000"}, {"58017","The DDM parameter value 0x{0} is not supported. An input host variable may not be within the range the server supports. The connection has been terminated.","40000"}, {"XBM01","Startup failed due to an exception. See next exception for details.","45000"}, {"XBM02","Startup failed due to missing functionality for {0}. Please ensure your classpath includes the correct Derby software.","45000"}, {"XBM03","Supplied value '{0}' for collation attribute is invalid, expecting UCS_BASIC or TERRITORY_BASED.","45000"}, {"XBM05","Startup failed due to missing product version information for {0}.","45000"}, {"XBM06","Startup failed. An encrypted database cannot be accessed without the correct boot password.","45000"}, {"XBM07","Startup failed. Boot password must be at least 8 bytes long.","45000"}, {"XBM08","Could not instantiate {0} StorageFactory class {1}.","45000"}, {"XBM0G","Failed to start encryption engine. Please make sure you are running Java 2 and have downloaded an encryption provider such as jce and put it in your class path.","45000"}, {"XBM0H","Directory {0} cannot be created.","45000"}, {"XBM0I","Directory {0} cannot be removed.","45000"}, {"XBM0J","Directory {0} already exists.","45000"}, {"XBM0K","Unknown sub-protocol for database name {0}.","45000"}, {"XBM0L","Specified authentication scheme class {0} does implement the authentication interface {1}.","45000"}, {"XBM0M","Error creating instance of authentication scheme class {0}.","45000"}, {"XBM0N","JDBC Driver registration with java.sql.DriverManager failed. See next exception for details.","45000"}, {"XBM0P","Service provider is read-only. Operation not permitted.","45000"}, {"XBM0Q","File {0} not found. Please make sure that backup copy is the correct one and it is not corrupted.","45000"}, {"XBM0R","Unable to remove File {0}.","45000"}, {"XBM0S","Unable to rename file '{0}' to '{1}'","45000"}, {"XBM0T","Ambiguous sub-protocol for database name {0}.","45000"}, {"XBM0X","Supplied territory description '{0}' is invalid, expecting ln[_CO[_variant]]\nln=lower-case two-letter ISO-639 language code, CO=upper-case two-letter ISO-3166 country codes, see java.util.Locale.","45000"}, {"XBM0Y","Backup database directory {0} not found. Please make sure that the specified backup path is right.","45000"}, {"XBM0Z","Unable to copy file '{0}' to '{1}'. Please make sure that there is enough space and permissions are correct.","45000"}, {"XCW00","Unsupported upgrade from '{0}' to '{1}'.","45000"}, {"XJ004","Database '{0}' not found.","40000"}, {"XJ015","Derby system shutdown.","50000"}, {"XJ028","The URL '{0}' is not properly formed.","40000"}, {"XJ040","Failed to start database '{0}', see the next exception for details.","40000"}, {"XJ041","Failed to create database '{0}', see the next exception for details.","40000"}, {"XJ049","Conflicting create attributes specified.","40000"}, {"XJ05B","JDBC attribute '{0}' has an invalid value '{1}', valid values are '{2}'.","40000"}, {"XJ081","Conflicting create/restore/recovery attributes specified.","40000"}, {"XJ213","The traceLevel connection property does not have a valid format for a number.","40000"}, {"XSDB0","Unexpected exception on in-memory page {0}","45000"}, {"XSDB1","Unknown page format at page {0}","45000"}, {"XSDB2","Unknown container format at container {0} : {1}","45000"}, {"XSDB3","Container information cannot change once written: was {0}, now {1}","45000"}, {"XSDB4","Page {0} is at version {1}, the log file contains change version {2}, either there are log records of this page missing, or this page did not get written out to disk properly.","45000"}, {"XSDB5","Log has change record on page {0}, which is beyond the end of the container.","45000"}, {"XSDB6","Another instance of Derby may have already booted the database {0}.","45000"}, {"XSDB7","WARNING: Derby (instance {0}) is attempting to boot the database {1} even though Derby (instance {2}) may still be active. Only one instance of Derby should boot a database at a time. Severe and non-recoverable corruption can result and may have already occurred.","45000"}, {"XSDB8","WARNING: Derby (instance {0}) is attempting to boot the database {1} even though Derby (instance {2}) may still be active. Only one instance of Derby should boot a database at a time. Severe and non-recoverable corruption can result if 2 instances of Derby boot on the same database at the same time. The db2j.database.forceDatabaseLock=true property has been set, so the database will not boot until the db.lck is no longer present. Normally this file is removed when the first instance of Derby to boot on the database exits, but it may be left behind in some shutdowns. It will be necessary to remove the file by hand in that case. It is important to verify that no other VM is accessing the database before deleting the db.lck file by hand.","45000"}, {"XSDB9","Stream container {0} is corrupt.","45000"}, {"XSDBA","Attempt to allocate object {0} failed.","45000"}, {"XSDG0","Page {0} could not be read from disk.","45000"}, {"XSDG1","Page {0} could not be written to disk, please check if disk is full.","45000"}, {"XSDG2","Invalid checksum on Page {0}, expected={1}, on-disk version={2}, page dump follows: {3}","45000"}, {"XSDG3","Meta-data for Container {0} could not be accessed","45000"}, {"XSDG5","Database is not in create mode when createFinished is called.","45000"}, {"XSDG6","Data segment directory not found in {0} backup during restore. Please make sure that backup copy is the right one and it is not corrupted.","45000"}, {"XSDG7","Directory {0} could not be removed during restore. Please make sure that permissions are correct.","45000"}, {"XSDG8","Unable to copy directory '{0}' to '{1}' during restore. Please make sure that there is enough space and permissions are correct.","45000"}, {"XSLA0","Cannot flush the log file to disk {0}.","45000"}, {"XSLA1","Log Record has been sent to the stream, but it cannot be applied to the store (Object {0}). This may cause recovery problems also.","45000"}, {"XSLA2","System will shutdown, got I/O Exception while accessing log file.","45000"}, {"XSLA3","Log Corrupted, has invalid data in the log stream.","45000"}, {"XSLA4","Cannot write to the log, most likely the log is full. Please delete unnecessary files. It is also possible that the file system is read only, or the disk has failed, or some other problems with the media.","45000"}, {"XSLA5","Cannot read log stream for some reason to rollback transaction {0}.","45000"}, {"XSLA6","Cannot recover the database.","45000"}, {"XSLA7","Cannot redo operation {0} in the log.","45000"}, {"XSLA8","Cannot rollback transaction {0}, trying to compensate {1} operation with {2}","45000"}, {"XSLAA","The store has been marked for shutdown by an earlier exception.","45000"}, {"XSLAB","Cannot find log file {0}, please make sure your logDevice property is properly set with the correct path separator for your platform.","45000"}, {"XSLAC","Database at {0} have incompatible format with the current version of software, it may have been created by or upgraded by a later version.","45000"}, {"XSLAD","log Record at instant {2} in log file {3} corrupted. Expected log record length {0}, real length {1}.","45000"}, {"XSLAE","Control file at {0} cannot be written or updated.","45000"}, {"XSLAF","A Read Only database was created with dirty data buffers.","45000"}, {"XSLAH","A Read Only database is being updated.","45000"}, {"XSLAI","Cannot log the checkpoint log record","45000"}, {"XSLAJ","The logging system has been marked to shut down due to an earlier problem and will not allow any more operations until the system shuts down and restarts.","45000"}, {"XSLAK","Database has exceeded largest log file number {0}.","45000"}, {"XSLAL","log record size {2} exceeded the maximum allowable log file size {3}. Error encountered in log file {0}, position {1}.","45000"}, {"XSLAM","Cannot verify database format at {1} due to IOException.","45000"}, {"XSLAN","Database at {0} has an incompatible format with the current version of the software. The database was created by or upgraded by version {1}.","45000"}, {"XSLAO","Recovery failed unexpected problem {0}.","45000"}, {"XSLAP","Database at {0} is at version {1}. Beta databases cannot be upgraded,","45000"}, {"XSLAQ","cannot create log file at directory {0}.","45000"}, {"XSLAR","Unable to copy log file '{0}' to '{1}' during restore. Please make sure that there is enough space and permissions are correct.","45000"}, {"XSLAS","Log directory {0} not found in backup during restore. Please make sure that backup copy is the correct one and it is not corrupted.","45000"}, {"XSLAT","The log directory '{0}' exists. The directory might belong to another database. Check that the location specified for the logDevice attribute is correct.","45000"}, {"XSTB0","An exception was thrown during transaction abort.","50000"}, {"XSTB2","Cannot log transaction changes, maybe trying to write to a read only database.","50000"}, {"XSTB3","Cannot abort transaction because the log manager is null, probably due to an earlier error.","50000"}, {"XSTB5","Creating database with logging disabled encountered unexpected problem.","50000"}, {"XSTB6","Cannot substitute a transaction table with another while one is already in use.","50000"}, {"XXXXX","Normal database session close.","40000"}}; JDBC.assertFullResultSet(rs, expectedRows); conn.rollback(); s.close(); }
public void test_errorcode() throws Exception { ResultSet rs = null; Connection conn = getConnection(); Statement s = conn.createStatement(); s.executeUpdate( "create table t(i int, s smallint)"); s.executeUpdate( "insert into t values (1,2)"); s.executeUpdate("insert into t values (1,2)"); s.executeUpdate("insert into t values (null,2)"); //-- parser error //-- bug 5701 assertStatementError("42X01",30000,s,"create table t(i nt, s smallint)"); //-- non-boolean where clause assertStatementError("42X19", 30000, s, "select * from t where i"); // -- invalid correlation name for "*" assertStatementError("42X10",30000, s, "select asdf.* from t"); //-- execution time error assertStatementError("22012",30000,s,"select i/0 from t"); // -- test ErrorMessages VTI rs = s.executeQuery( "select * from SYSCS_DIAG.error_Messages where " + "sql_state = '07000'"); String [][] expRS = new String [][] { {"07000", "At least one parameter to the current statement " + "is uninitialized.", "20000"} }; JDBC.assertFullResultSet(rs,expRS); // Test severe error messages. Existing messages should not change SQLState. // new ones can be added. rs = s.executeQuery("select * from SYSCS_DIAG.Error_messages where SEVERITY >= 40000 order by SQL_STATE"); //Utilities.showResultSet(rs); String [][] expectedRows = {{"08000","Connection closed by unknown interrupt.","40000"}, {"08001","A connection could not be established because the security token is larger than the maximum allowed by the network protocol.","40000"}, {"08001","A connection could not be established because the user id has a length of zero or is larger than the maximum allowed by the network protocol.","40000"}, {"08001","A connection could not be established because the password has a length of zero or is larger than the maximum allowed by the network protocol.","40000"}, {"08001","Required Derby DataSource property {0} not set.","40000"}, {"08001","{0} : Error connecting to server {1} on port {2} with message {3}.","40000"}, {"08001","SocketException: '{0}'","40000"}, {"08001","Unable to open stream on socket: '{0}'.","40000"}, {"08001","User id length ({0}) is outside the range of 1 to {1}.","40000"}, {"08001","Password length ({0}) is outside the range of 1 to {1}.","40000"}, {"08001","User id can not be null.","40000"}, {"08001","Password can not be null.","40000"}, {"08001","A connection could not be established because the database name '{0}' is larger than the maximum length allowed by the network protocol.","40000"}, {"08003","No current connection.","40000"}, {"08003","getConnection() is not valid on a closed PooledConnection.","40000"}, {"08003","Lob method called after connection was closed","40000"}, {"08003","The underlying physical connection is stale or closed.","40000"}, {"08004","Connection refused : {0}","40000"}, {"08004","Connection authentication failure occurred. Reason: {0}.","40000"}, {"08004","The connection was refused because the database {0} was not found.","40000"}, {"08004","Database connection refused.","40000"}, {"08004","User '{0}' cannot shut down database '{1}'. Only the database owner can perform this operation.","40000"}, {"08004","User '{0}' cannot (re)encrypt database '{1}'. Only the database owner can perform this operation.","40000"}, {"08004","User '{0}' cannot hard upgrade database '{1}'. Only the database owner can perform this operation.","40000"}, {"08006","An error occurred during connect reset and the connection has been terminated. See chained exceptions for details.","40000"}, {"08006","Database '{0}' shutdown.","45000"}, {"0A000","The DRDA command {0} is not currently implemented. The connection has been terminated.","40000"}, {"28502","The user name '{0}' is not valid.","40000"}, {"57017","There is no available conversion for the source code page, {0}, to the target code page, {1}. The connection has been terminated.","40000"}, {"58009","Network protocol exception: only one of the VCM, VCS length can be greater than 0. The connection has been terminated.","40000"}, {"58009","The connection was terminated because the encoding is not supported.","40000"}, {"58009","Network protocol exception: actual code point, {0}, does not match expected code point, {1}. The connection has been terminated.","40000"}, {"58009","Network protocol exception: DDM collection contains less than 4 bytes of data. The connection has been terminated.","40000"}, {"58009","Network protocol exception: collection stack not empty at end of same id chain parse. The connection has been terminated.","40000"}, {"58009","Network protocol exception: DSS length not 0 at end of same id chain parse. The connection has been terminated.","40000"}, {"58009","Network protocol exception: DSS chained with same id at end of same id chain parse. The connection has been terminated.","40000"}, {"58009","Network protocol exception: end of stream prematurely reached while reading InputStream, parameter #{0}. The connection has been terminated.","40000"}, {"58009","Network protocol exception: invalid FDOCA LID. The connection has been terminated.","40000"}, {"58009","Network protocol exception: SECTKN was not returned. The connection has been terminated.","40000"}, {"58009","Network protocol exception: only one of NVCM, NVCS can be non-null. The connection has been terminated.","40000"}, {"58009","Network protocol exception: SCLDTA length, {0}, is invalid for RDBNAM. The connection has been terminated.","40000"}, {"58009","SocketException: '{0}'","40000"}, {"58009","A communications error has been detected: {0}.","40000"}, {"58009","An error occurred during a deferred connect reset and the connection has been terminated. See chained exceptions for details.","40000"}, {"58009","Insufficient data while reading from the network - expected a minimum of {0} bytes and received only {1} bytes. The connection has been terminated.","40000"}, {"58009","Attempt to fully materialize lob data that is too large for the JVM. The connection has been terminated.","40000"}, {"58009","Network protocol exception: SCLDTA length, {0}, is invalid for RDBCOLID. The connection has been terminated.","40000"}, {"58009","Network protocol exception: SCLDTA length, {0}, is invalid for PKGID. The connection has been terminated.","40000"}, {"58009","Network protocol exception: PKGNAMCSN length, {0}, is invalid at SQLAM {1}. The connection has been terminated.","40000"}, {"58009","A network protocol error was encountered and the connection has been terminated: {0}","40000"}, {"58010","A network protocol error was encountered. A connection could not be established because the manager {0} at level {1} is not supported by the server.","40000"}, {"58014","The DDM command 0x{0} is not supported. The connection has been terminated.","40000"}, {"58015","The DDM object 0x{0} is not supported. The connection has been terminated.","40000"}, {"58016","The DDM parameter 0x{0} is not supported. The connection has been terminated.","40000"}, {"58017","The DDM parameter value 0x{0} is not supported. An input host variable may not be within the range the server supports. The connection has been terminated.","40000"}, {"XBM01","Startup failed due to an exception. See next exception for details.","45000"}, {"XBM02","Startup failed due to missing functionality for {0}. Please ensure your classpath includes the correct Derby software.","45000"}, {"XBM03","Supplied value '{0}' for collation attribute is invalid, expecting UCS_BASIC or TERRITORY_BASED.","45000"}, {"XBM05","Startup failed due to missing product version information for {0}.","45000"}, {"XBM06","Startup failed. An encrypted database cannot be accessed without the correct boot password.","45000"}, {"XBM07","Startup failed. Boot password must be at least 8 bytes long.","45000"}, {"XBM08","Could not instantiate {0} StorageFactory class {1}.","45000"}, {"XBM0G","Failed to start encryption engine. Please make sure you are running Java 2 and have downloaded an encryption provider such as jce and put it in your class path.","45000"}, {"XBM0H","Directory {0} cannot be created.","45000"}, {"XBM0I","Directory {0} cannot be removed.","45000"}, {"XBM0J","Directory {0} already exists.","45000"}, {"XBM0K","Unknown sub-protocol for database name {0}.","45000"}, {"XBM0L","Specified authentication scheme class {0} does implement the authentication interface {1}.","45000"}, {"XBM0M","Error creating instance of authentication scheme class {0}.","45000"}, {"XBM0N","JDBC Driver registration with java.sql.DriverManager failed. See next exception for details.","45000"}, {"XBM0P","Service provider is read-only. Operation not permitted.","45000"}, {"XBM0Q","File {0} not found. Please make sure that backup copy is the correct one and it is not corrupted.","45000"}, {"XBM0R","Unable to remove File {0}.","45000"}, {"XBM0S","Unable to rename file '{0}' to '{1}'","45000"}, {"XBM0T","Ambiguous sub-protocol for database name {0}.","45000"}, {"XBM0X","Supplied territory description '{0}' is invalid, expecting ln[_CO[_variant]]\nln=lower-case two-letter ISO-639 language code, CO=upper-case two-letter ISO-3166 country codes, see java.util.Locale.","45000"}, {"XBM0Y","Backup database directory {0} not found. Please make sure that the specified backup path is right.","45000"}, {"XBM0Z","Unable to copy file '{0}' to '{1}'. Please make sure that there is enough space and permissions are correct.","45000"}, {"XCW00","Unsupported upgrade from '{0}' to '{1}'.","45000"}, {"XJ004","Database '{0}' not found.","40000"}, {"XJ015","Derby system shutdown.","50000"}, {"XJ028","The URL '{0}' is not properly formed.","40000"}, {"XJ040","Failed to start database '{0}', see the next exception for details.","40000"}, {"XJ041","Failed to create database '{0}', see the next exception for details.","40000"}, {"XJ049","Conflicting create attributes specified.","40000"}, {"XJ05B","JDBC attribute '{0}' has an invalid value '{1}', valid values are '{2}'.","40000"}, {"XJ081","Conflicting create/restore/recovery attributes specified.","40000"}, {"XJ213","The traceLevel connection property does not have a valid format for a number.","40000"}, {"XSDB0","Unexpected exception on in-memory page {0}","45000"}, {"XSDB1","Unknown page format at page {0}","45000"}, {"XSDB2","Unknown container format at container {0} : {1}","45000"}, {"XSDB3","Container information cannot change once written: was {0}, now {1}","45000"}, {"XSDB4","Page {0} is at version {1}, the log file contains change version {2}, either there are log records of this page missing, or this page did not get written out to disk properly.","45000"}, {"XSDB5","Log has change record on page {0}, which is beyond the end of the container.","45000"}, {"XSDB6","Another instance of Derby may have already booted the database {0}.","45000"}, {"XSDB7","WARNING: Derby (instance {0}) is attempting to boot the database {1} even though Derby (instance {2}) may still be active. Only one instance of Derby should boot a database at a time. Severe and non-recoverable corruption can result and may have already occurred.","45000"}, {"XSDB8","WARNING: Derby (instance {0}) is attempting to boot the database {1} even though Derby (instance {2}) may still be active. Only one instance of Derby should boot a database at a time. Severe and non-recoverable corruption can result if 2 instances of Derby boot on the same database at the same time. The db2j.database.forceDatabaseLock=true property has been set, so the database will not boot until the db.lck is no longer present. Normally this file is removed when the first instance of Derby to boot on the database exits, but it may be left behind in some shutdowns. It will be necessary to remove the file by hand in that case. It is important to verify that no other VM is accessing the database before deleting the db.lck file by hand.","45000"}, {"XSDB9","Stream container {0} is corrupt.","45000"}, {"XSDBA","Attempt to allocate object {0} failed.","45000"}, {"XSDG0","Page {0} could not be read from disk.","45000"}, {"XSDG1","Page {0} could not be written to disk, please check if disk is full.","45000"}, {"XSDG2","Invalid checksum on Page {0}, expected={1}, on-disk version={2}, page dump follows: {3}","45000"}, {"XSDG3","Meta-data for Container {0} could not be accessed","45000"}, {"XSDG5","Database is not in create mode when createFinished is called.","45000"}, {"XSDG6","Data segment directory not found in {0} backup during restore. Please make sure that backup copy is the right one and it is not corrupted.","45000"}, {"XSDG7","Directory {0} could not be removed during restore. Please make sure that permissions are correct.","45000"}, {"XSDG8","Unable to copy directory '{0}' to '{1}' during restore. Please make sure that there is enough space and permissions are correct.","45000"}, {"XSLA0","Cannot flush the log file to disk {0}.","45000"}, {"XSLA1","Log Record has been sent to the stream, but it cannot be applied to the store (Object {0}). This may cause recovery problems also.","45000"}, {"XSLA2","System will shutdown, got I/O Exception while accessing log file.","45000"}, {"XSLA3","Log Corrupted, has invalid data in the log stream.","45000"}, {"XSLA4","Cannot write to the log, most likely the log is full. Please delete unnecessary files. It is also possible that the file system is read only, or the disk has failed, or some other problems with the media.","45000"}, {"XSLA5","Cannot read log stream for some reason to rollback transaction {0}.","45000"}, {"XSLA6","Cannot recover the database.","45000"}, {"XSLA7","Cannot redo operation {0} in the log.","45000"}, {"XSLA8","Cannot rollback transaction {0}, trying to compensate {1} operation with {2}","45000"}, {"XSLAA","The store has been marked for shutdown by an earlier exception.","45000"}, {"XSLAB","Cannot find log file {0}, please make sure your logDevice property is properly set with the correct path separator for your platform.","45000"}, {"XSLAC","Database at {0} have incompatible format with the current version of software, it may have been created by or upgraded by a later version.","45000"}, {"XSLAD","log Record at instant {2} in log file {3} corrupted. Expected log record length {0}, real length {1}.","45000"}, {"XSLAE","Control file at {0} cannot be written or updated.","45000"}, {"XSLAF","A Read Only database was created with dirty data buffers.","45000"}, {"XSLAH","A Read Only database is being updated.","45000"}, {"XSLAI","Cannot log the checkpoint log record","45000"}, {"XSLAJ","The logging system has been marked to shut down due to an earlier problem and will not allow any more operations until the system shuts down and restarts.","45000"}, {"XSLAK","Database has exceeded largest log file number {0}.","45000"}, {"XSLAL","log record size {2} exceeded the maximum allowable log file size {3}. Error encountered in log file {0}, position {1}.","45000"}, {"XSLAM","Cannot verify database format at {1} due to IOException.","45000"}, {"XSLAN","Database at {0} has an incompatible format with the current version of the software. The database was created by or upgraded by version {1}.","45000"}, {"XSLAO","Recovery failed unexpected problem {0}.","45000"}, {"XSLAP","Database at {0} is at version {1}. Beta databases cannot be upgraded,","45000"}, {"XSLAQ","cannot create log file at directory {0}.","45000"}, {"XSLAR","Unable to copy log file '{0}' to '{1}' during restore. Please make sure that there is enough space and permissions are correct.","45000"}, {"XSLAS","Log directory {0} not found in backup during restore. Please make sure that backup copy is the correct one and it is not corrupted.","45000"}, {"XSLAT","The log directory '{0}' exists. The directory might belong to another database. Check that the location specified for the logDevice attribute is correct.","45000"}, {"XSTB0","An exception was thrown during transaction abort.","50000"}, {"XSTB2","Cannot log transaction changes, maybe trying to write to a read only database.","50000"}, {"XSTB3","Cannot abort transaction because the log manager is null, probably due to an earlier error.","50000"}, {"XSTB5","Creating database with logging disabled encountered unexpected problem.","50000"}, {"XSTB6","Cannot substitute a transaction table with another while one is already in use.","50000"}, {"XXXXX","Normal database session close.","40000"}}; JDBC.assertUnorderedResultSet(rs, expectedRows); conn.rollback(); s.close(); }
public void testDiskFull() throws IOException { Term searchTerm = new Term("content", "aaa"); int START_COUNT = 157; int END_COUNT = 144; // First build up a starting index: MockDirectoryWrapper startDir = newDirectory(); IndexWriter writer = new IndexWriter(startDir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random))); if (VERBOSE) { System.out.println("TEST: create initial index"); writer.setInfoStream(System.out); } for(int i=0;i<157;i++) { Document d = new Document(); d.add(newField("id", Integer.toString(i), Field.Store.YES, Field.Index.NOT_ANALYZED)); d.add(newField("content", "aaa " + i, Field.Store.NO, Field.Index.ANALYZED)); writer.addDocument(d); } writer.close(); long diskUsage = startDir.getRecomputedActualSizeInBytes(); long diskFree = diskUsage+100; IOException err = null; boolean done = false; // Iterate w/ ever increasing free disk space: while(!done) { MockDirectoryWrapper dir = new MockDirectoryWrapper(random, new RAMDirectory(startDir)); // If IndexReader hits disk full, it can write to // the same files again. dir.setPreventDoubleWrite(false); IndexReader reader = IndexReader.open(dir, false); // For each disk size, first try to commit against // dir that will hit random IOExceptions & disk // full; after, give it infinite disk space & turn // off random IOExceptions & retry w/ same reader: boolean success = false; for(int x=0;x<2;x++) { double rate = 0.05; double diskRatio = ((double) diskFree)/diskUsage; long thisDiskFree; String testName; if (0 == x) { thisDiskFree = diskFree; if (diskRatio >= 2.0) { rate /= 2; } if (diskRatio >= 4.0) { rate /= 2; } if (diskRatio >= 6.0) { rate = 0.0; } if (VERBOSE) { System.out.println("\ncycle: " + diskFree + " bytes"); } testName = "disk full during reader.close() @ " + thisDiskFree + " bytes"; } else { thisDiskFree = 0; rate = 0.0; if (VERBOSE) { System.out.println("\ncycle: same writer: unlimited disk space"); } testName = "reader re-use after disk full"; } dir.setMaxSizeInBytes(thisDiskFree); dir.setRandomIOExceptionRate(rate); try { if (0 == x) { int docId = 12; for(int i=0;i<13;i++) { reader.deleteDocument(docId); reader.setNorm(docId, "contents", (float) 2.0); docId += 12; } } reader.close(); success = true; if (0 == x) { done = true; } } catch (IOException e) { if (VERBOSE) { System.out.println(" hit IOException: " + e); e.printStackTrace(System.out); } err = e; if (1 == x) { e.printStackTrace(); fail(testName + " hit IOException after disk space was freed up"); } } // Finally, verify index is not corrupt, and, if // we succeeded, we see all docs changed, and if // we failed, we see either all docs or no docs // changed (transactional semantics): IndexReader newReader = null; try { newReader = IndexReader.open(dir, false); } catch (IOException e) { e.printStackTrace(); fail(testName + ":exception when creating IndexReader after disk full during close: " + e); } /* int result = newReader.docFreq(searchTerm); if (success) { if (result != END_COUNT) { fail(testName + ": method did not throw exception but docFreq('aaa') is " + result + " instead of expected " + END_COUNT); } } else { // On hitting exception we still may have added // all docs: if (result != START_COUNT && result != END_COUNT) { err.printStackTrace(); fail(testName + ": method did throw exception but docFreq('aaa') is " + result + " instead of expected " + START_COUNT + " or " + END_COUNT); } } */ IndexSearcher searcher = newSearcher(newReader); ScoreDoc[] hits = null; try { hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs; } catch (IOException e) { e.printStackTrace(); fail(testName + ": exception when searching: " + e); } int result2 = hits.length; if (success) { if (result2 != END_COUNT) { fail(testName + ": method did not throw exception but hits.length for search on term 'aaa' is " + result2 + " instead of expected " + END_COUNT); } } else { // On hitting exception we still may have added // all docs: if (result2 != START_COUNT && result2 != END_COUNT) { err.printStackTrace(); fail(testName + ": method did throw exception but hits.length for search on term 'aaa' is " + result2 + " instead of expected " + START_COUNT); } } searcher.close(); newReader.close(); if (result2 == END_COUNT) { break; } } dir.close(); // Try again with 10 more bytes of free space: diskFree += 10; } startDir.close(); }
public void testDiskFull() throws IOException { Term searchTerm = new Term("content", "aaa"); int START_COUNT = 157; int END_COUNT = 144; // First build up a starting index: MockDirectoryWrapper startDir = newDirectory(); IndexWriter writer = new IndexWriter(startDir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random))); if (VERBOSE) { System.out.println("TEST: create initial index"); writer.setInfoStream(System.out); } for(int i=0;i<157;i++) { Document d = new Document(); d.add(newField("id", Integer.toString(i), Field.Store.YES, Field.Index.NOT_ANALYZED)); d.add(newField("content", "aaa " + i, Field.Store.NO, Field.Index.ANALYZED)); writer.addDocument(d); } writer.close(); long diskUsage = startDir.getRecomputedActualSizeInBytes(); long diskFree = diskUsage+100; IOException err = null; boolean done = false; // Iterate w/ ever increasing free disk space: while(!done) { MockDirectoryWrapper dir = new MockDirectoryWrapper(random, new RAMDirectory(startDir)); // If IndexReader hits disk full, it can write to // the same files again. dir.setPreventDoubleWrite(false); IndexReader reader = IndexReader.open(dir, false); // For each disk size, first try to commit against // dir that will hit random IOExceptions & disk // full; after, give it infinite disk space & turn // off random IOExceptions & retry w/ same reader: boolean success = false; for(int x=0;x<2;x++) { double rate = 0.05; double diskRatio = ((double) diskFree)/diskUsage; long thisDiskFree; String testName; if (0 == x) { thisDiskFree = diskFree; if (diskRatio >= 2.0) { rate /= 2; } if (diskRatio >= 4.0) { rate /= 2; } if (diskRatio >= 6.0) { rate = 0.0; } if (VERBOSE) { System.out.println("\ncycle: " + diskFree + " bytes"); } testName = "disk full during reader.close() @ " + thisDiskFree + " bytes"; } else { thisDiskFree = 0; rate = 0.0; if (VERBOSE) { System.out.println("\ncycle: same writer: unlimited disk space"); } testName = "reader re-use after disk full"; } dir.setMaxSizeInBytes(thisDiskFree); dir.setRandomIOExceptionRate(rate); try { if (0 == x) { int docId = 12; for(int i=0;i<13;i++) { reader.deleteDocument(docId); reader.setNorm(docId, "content", (float) 2.0); docId += 12; } } reader.close(); success = true; if (0 == x) { done = true; } } catch (IOException e) { if (VERBOSE) { System.out.println(" hit IOException: " + e); e.printStackTrace(System.out); } err = e; if (1 == x) { e.printStackTrace(); fail(testName + " hit IOException after disk space was freed up"); } } // Finally, verify index is not corrupt, and, if // we succeeded, we see all docs changed, and if // we failed, we see either all docs or no docs // changed (transactional semantics): IndexReader newReader = null; try { newReader = IndexReader.open(dir, false); } catch (IOException e) { e.printStackTrace(); fail(testName + ":exception when creating IndexReader after disk full during close: " + e); } /* int result = newReader.docFreq(searchTerm); if (success) { if (result != END_COUNT) { fail(testName + ": method did not throw exception but docFreq('aaa') is " + result + " instead of expected " + END_COUNT); } } else { // On hitting exception we still may have added // all docs: if (result != START_COUNT && result != END_COUNT) { err.printStackTrace(); fail(testName + ": method did throw exception but docFreq('aaa') is " + result + " instead of expected " + START_COUNT + " or " + END_COUNT); } } */ IndexSearcher searcher = newSearcher(newReader); ScoreDoc[] hits = null; try { hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs; } catch (IOException e) { e.printStackTrace(); fail(testName + ": exception when searching: " + e); } int result2 = hits.length; if (success) { if (result2 != END_COUNT) { fail(testName + ": method did not throw exception but hits.length for search on term 'aaa' is " + result2 + " instead of expected " + END_COUNT); } } else { // On hitting exception we still may have added // all docs: if (result2 != START_COUNT && result2 != END_COUNT) { err.printStackTrace(); fail(testName + ": method did throw exception but hits.length for search on term 'aaa' is " + result2 + " instead of expected " + START_COUNT); } } searcher.close(); newReader.close(); if (result2 == END_COUNT) { break; } } dir.close(); // Try again with 10 more bytes of free space: diskFree += 10; } startDir.close(); }
public int hashCode() { long h = 0x98761234; // something non-zero for length==0 for (int i = bits.length; --i>=0;) { h ^= bits[i]; h = (h << 1) | (h >>> 31); // rotate left } return (int)((h>>32) ^ h); // fold leftmost bits into right }
public int hashCode() { long h = 0x98761234; // something non-zero for length==0 for (int i = bits.length; --i>=0;) { h ^= bits[i]; h = (h << 1) | (h >>> 63); // rotate left } return (int)((h>>32) ^ h); // fold leftmost bits into right }
public void basicUsageTest() throws Exception { SolrXMLSerializer serializer = new SolrXMLSerializer(); SolrXMLDef solrXMLDef = getTestSolrXMLDef(defaultCoreNameKey, defaultCoreNameVal, peristentKey, persistentVal, sharedLibKey, sharedLibVal, adminPathKey, adminPathVal, shareSchemaKey, shareSchemaVal, instanceDirKey, instanceDirVal); Writer w = new StringWriter(); try { serializer.persist(w, solrXMLDef); } finally { w.close(); } assertResults(((StringWriter) w).getBuffer().toString().getBytes("UTF-8")); // again with default file File tmpFile = _TestUtil.getTempDir("solr.xml"); serializer.persistFile(tmpFile, solrXMLDef); assertResults(FileUtils.readFileToString(tmpFile, "UTF-8").getBytes("UTF-8")); tmpFile.delete(); }
public void basicUsageTest() throws Exception { SolrXMLSerializer serializer = new SolrXMLSerializer(); SolrXMLDef solrXMLDef = getTestSolrXMLDef(defaultCoreNameKey, defaultCoreNameVal, peristentKey, persistentVal, sharedLibKey, sharedLibVal, adminPathKey, adminPathVal, shareSchemaKey, shareSchemaVal, instanceDirKey, instanceDirVal); Writer w = new StringWriter(); try { serializer.persist(w, solrXMLDef); } finally { w.close(); } assertResults(((StringWriter) w).getBuffer().toString().getBytes("UTF-8")); // again with default file File tmpFile = _TestUtil.createTempFile("solr.xml", null, TEMP_DIR); serializer.persistFile(tmpFile, solrXMLDef); assertResults(FileUtils.readFileToString(tmpFile, "UTF-8").getBytes("UTF-8")); tmpFile.delete(); }
public void testMergeWarmer() throws Exception { Directory dir1 = new MockRAMDirectory(); // Enroll warmer MyWarmer warmer = new MyWarmer(); IndexWriter writer = new IndexWriter(dir1, new IndexWriterConfig( TEST_VERSION_CURRENT, new WhitespaceAnalyzer(TEST_VERSION_CURRENT)) .setMaxBufferedDocs(2).setMergedSegmentWarmer(warmer)); writer.setInfoStream(infoStream); // create the index createIndexNoClose(false, "test", writer); // get a reader to put writer into near real-time mode IndexReader r1 = writer.getReader(); ((LogMergePolicy) writer.getConfig().getMergePolicy()).setMergeFactor(2); for (int i = 0; i < 10; i++) { writer.addDocument(createDocument(i, "test", 4)); } ((ConcurrentMergeScheduler) writer.getConfig().getMergeScheduler()).sync(); assertTrue(warmer.warmCount > 0); final int count = warmer.warmCount; writer.addDocument(createDocument(17, "test", 4)); writer.optimize(); assertTrue(warmer.warmCount > count); writer.close(); r1.close(); dir1.close(); }
public void testMergeWarmer() throws Exception { Directory dir1 = new MockRAMDirectory(); // Enroll warmer MyWarmer warmer = new MyWarmer(); IndexWriter writer = new IndexWriter(dir1, new IndexWriterConfig( TEST_VERSION_CURRENT, new WhitespaceAnalyzer(TEST_VERSION_CURRENT)) .setMaxBufferedDocs(2).setMergedSegmentWarmer(warmer)); writer.setInfoStream(infoStream); // create the index createIndexNoClose(false, "test", writer); // get a reader to put writer into near real-time mode IndexReader r1 = writer.getReader(); ((LogMergePolicy) writer.getConfig().getMergePolicy()).setMergeFactor(2); for (int i = 0; i < 100*_TestUtil.getRandomMultiplier(); i++) { writer.addDocument(createDocument(i, "test", 4)); } ((ConcurrentMergeScheduler) writer.getConfig().getMergeScheduler()).sync(); assertTrue(warmer.warmCount > 0); final int count = warmer.warmCount; writer.addDocument(createDocument(17, "test", 4)); writer.optimize(); assertTrue(warmer.warmCount > count); writer.close(); r1.close(); dir1.close(); }
public void checkpoint(SegmentInfos segmentInfos, boolean isCommit) throws IOException { if (infoStream != null) { message("now checkpoint \"" + segmentInfos.getCurrentSegmentFileName() + "\" [" + segmentInfos.size() + " segments " + "; isCommit = " + isCommit + "]"); } // Try again now to delete any previously un-deletable // files (because they were in use, on Windows): deletePendingFiles(); // Incref the files: incRef(segmentInfos, isCommit); if (isCommit) { // Append to our commits list: commits.add(new CommitPoint(commitsToDelete, directory, segmentInfos)); // Tell policy so it can remove commits: policy.onCommit(commits); // Decref files for commits that were deleted by the policy: deleteCommits(); } else { final List<String> docWriterFiles; if (docWriter != null) { docWriterFiles = docWriter.openFiles(); if (docWriterFiles != null) // We must incRef these files before decRef'ing // last files to make sure we don't accidentally // delete them: incRef(docWriterFiles); } else docWriterFiles = null; // DecRef old files from the last checkpoint, if any: int size = lastFiles.size(); if (size > 0) { for(int i=0;i<size;i++) decRef(lastFiles.get(i)); lastFiles.clear(); } // Save files so we can decr on next checkpoint/commit: lastFiles.add(segmentInfos.files(directory, false)); if (docWriterFiles != null) lastFiles.add(docWriterFiles); } } void incRef(SegmentInfos segmentInfos, boolean isCommit) throws IOException { // If this is a commit point, also incRef the // segments_N file: for( final String fileName: segmentInfos.files(directory, isCommit) ) { incRef(fileName); } } void incRef(List<String> files) throws IOException { for(final String file : files) { incRef(file); } }
public void checkpoint(SegmentInfos segmentInfos, boolean isCommit) throws IOException { if (infoStream != null) { message("now checkpoint \"" + segmentInfos.getCurrentSegmentFileName() + "\" [" + segmentInfos.size() + " segments " + "; isCommit = " + isCommit + "]"); } // Try again now to delete any previously un-deletable // files (because they were in use, on Windows): deletePendingFiles(); // Incref the files: incRef(segmentInfos, isCommit); if (isCommit) { // Append to our commits list: commits.add(new CommitPoint(commitsToDelete, directory, segmentInfos)); // Tell policy so it can remove commits: policy.onCommit(commits); // Decref files for commits that were deleted by the policy: deleteCommits(); } else { final List<String> docWriterFiles; if (docWriter != null) { docWriterFiles = docWriter.openFiles(); if (docWriterFiles != null) // We must incRef these files before decRef'ing // last files to make sure we don't accidentally // delete them: incRef(docWriterFiles); } else docWriterFiles = null; // DecRef old files from the last checkpoint, if any: int size = lastFiles.size(); if (size > 0) { for(int i=0;i<size;i++) decRef(lastFiles.get(i)); lastFiles.clear(); } // Save files so we can decr on next checkpoint/commit: lastFiles.add(segmentInfos.files(directory, false)); if (docWriterFiles != null) lastFiles.add(docWriterFiles); } } void incRef(SegmentInfos segmentInfos, boolean isCommit) throws IOException { // If this is a commit point, also incRef the // segments_N file: for( final String fileName: segmentInfos.files(directory, isCommit) ) { incRef(fileName); } } void incRef(Collection<String> files) throws IOException { for(final String file : files) { incRef(file); } }
public Filter makeFilter(SpatialArgs args) { final SpatialOperation op = args.getOperation(); if (! SpatialOperation.is(op, SpatialOperation.IsWithin, SpatialOperation.Intersects, SpatialOperation.BBoxWithin, SpatialOperation.BBoxIntersects)) throw new UnsupportedSpatialOperation(op); Shape shape = args.getShape(); int detailLevel = grid.getLevelForDistance(args.resolveDistErr(ctx, distErrPct)); List<Node> cells = grid.getNodes(shape, detailLevel, false); TermsFilter filter = new TermsFilter(); for (Node cell : cells) { filter.addTerm(new Term(getFieldName(), cell.getTokenString())); } return filter; }
public Filter makeFilter(SpatialArgs args) { final SpatialOperation op = args.getOperation(); if (op != SpatialOperation.Intersects) throw new UnsupportedSpatialOperation(op); Shape shape = args.getShape(); int detailLevel = grid.getLevelForDistance(args.resolveDistErr(ctx, distErrPct)); List<Node> cells = grid.getNodes(shape, detailLevel, false); TermsFilter filter = new TermsFilter(); for (Node cell : cells) { filter.addTerm(new Term(getFieldName(), cell.getTokenString())); } return filter; }
public void testFilterWithVariableScanLevel() throws IOException { init(GeohashPrefixTree.getMaxLevelsPossible()); getAddAndVerifyIndexedDocuments(DATA_WORLD_CITIES_POINTS); //execute queries for each prefix grid scan level for(int i = 0; i <= maxLength; i++) { ((RecursivePrefixTreeStrategy)strategy).setPrefixGridScanLevel(i); executeQueries(SpatialMatchConcern.FILTER, QTEST_Cities_IsWithin_BBox); } }
public void testFilterWithVariableScanLevel() throws IOException { init(GeohashPrefixTree.getMaxLevelsPossible()); getAddAndVerifyIndexedDocuments(DATA_WORLD_CITIES_POINTS); //execute queries for each prefix grid scan level for(int i = 0; i <= maxLength; i++) { ((RecursivePrefixTreeStrategy)strategy).setPrefixGridScanLevel(i); executeQueries(SpatialMatchConcern.FILTER, QTEST_Cities_Intersects_BBox); } }
public void testDiskFull() throws IOException { Term searchTerm = new Term("content", "aaa"); int START_COUNT = 157; int END_COUNT = 144; // First build up a starting index: MockDirectoryWrapper startDir = newDirectory(); IndexWriter writer = new IndexWriter(startDir, newIndexWriterConfig(TEST_VERSION_CURRENT, new WhitespaceAnalyzer(TEST_VERSION_CURRENT))); for(int i=0;i<157;i++) { Document d = new Document(); d.add(newField("id", Integer.toString(i), Field.Store.YES, Field.Index.NOT_ANALYZED)); d.add(newField("content", "aaa " + i, Field.Store.NO, Field.Index.ANALYZED)); writer.addDocument(d); } writer.close(); long diskUsage = startDir.getRecomputedActualSizeInBytes(); long diskFree = diskUsage+100; IOException err = null; boolean done = false; // Iterate w/ ever increasing free disk space: while(!done) { MockDirectoryWrapper dir = new MockDirectoryWrapper(random, new RAMDirectory(startDir)); // If IndexReader hits disk full, it can write to // the same files again. dir.setPreventDoubleWrite(false); IndexReader reader = IndexReader.open(dir, false); // For each disk size, first try to commit against // dir that will hit random IOExceptions & disk // full; after, give it infinite disk space & turn // off random IOExceptions & retry w/ same reader: boolean success = false; for(int x=0;x<2;x++) { double rate = 0.05; double diskRatio = ((double) diskFree)/diskUsage; long thisDiskFree; String testName; if (0 == x) { thisDiskFree = diskFree; if (diskRatio >= 2.0) { rate /= 2; } if (diskRatio >= 4.0) { rate /= 2; } if (diskRatio >= 6.0) { rate = 0.0; } if (VERBOSE) { System.out.println("\ncycle: " + diskFree + " bytes"); } testName = "disk full during reader.close() @ " + thisDiskFree + " bytes"; } else { thisDiskFree = 0; rate = 0.0; if (VERBOSE) { System.out.println("\ncycle: same writer: unlimited disk space"); } testName = "reader re-use after disk full"; } dir.setMaxSizeInBytes(thisDiskFree); dir.setRandomIOExceptionRate(rate); try { if (0 == x) { int docId = 12; for(int i=0;i<13;i++) { reader.deleteDocument(docId); reader.setNorm(docId, "contents", (float) 2.0); docId += 12; } } reader.close(); success = true; if (0 == x) { done = true; } } catch (IOException e) { if (VERBOSE) { System.out.println(" hit IOException: " + e); e.printStackTrace(System.out); } err = e; if (1 == x) { e.printStackTrace(); fail(testName + " hit IOException after disk space was freed up"); } } // Finally, verify index is not corrupt, and, if // we succeeded, we see all docs changed, and if // we failed, we see either all docs or no docs // changed (transactional semantics): IndexReader newReader = null; try { newReader = IndexReader.open(dir, false); } catch (IOException e) { e.printStackTrace(); fail(testName + ":exception when creating IndexReader after disk full during close: " + e); } /* int result = newReader.docFreq(searchTerm); if (success) { if (result != END_COUNT) { fail(testName + ": method did not throw exception but docFreq('aaa') is " + result + " instead of expected " + END_COUNT); } } else { // On hitting exception we still may have added // all docs: if (result != START_COUNT && result != END_COUNT) { err.printStackTrace(); fail(testName + ": method did throw exception but docFreq('aaa') is " + result + " instead of expected " + START_COUNT + " or " + END_COUNT); } } */ IndexSearcher searcher = newSearcher(newReader); ScoreDoc[] hits = null; try { hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs; } catch (IOException e) { e.printStackTrace(); fail(testName + ": exception when searching: " + e); } int result2 = hits.length; if (success) { if (result2 != END_COUNT) { fail(testName + ": method did not throw exception but hits.length for search on term 'aaa' is " + result2 + " instead of expected " + END_COUNT); } } else { // On hitting exception we still may have added // all docs: if (result2 != START_COUNT && result2 != END_COUNT) { err.printStackTrace(); fail(testName + ": method did throw exception but hits.length for search on term 'aaa' is " + result2 + " instead of expected " + START_COUNT); } } searcher.close(); newReader.close(); if (result2 == END_COUNT) { break; } } dir.close(); // Try again with 10 more bytes of free space: diskFree += 10; } startDir.close(); }
public void testDiskFull() throws IOException { Term searchTerm = new Term("content", "aaa"); int START_COUNT = 157; int END_COUNT = 144; // First build up a starting index: MockDirectoryWrapper startDir = newDirectory(); IndexWriter writer = new IndexWriter(startDir, newIndexWriterConfig(TEST_VERSION_CURRENT, new WhitespaceAnalyzer(TEST_VERSION_CURRENT))); for(int i=0;i<157;i++) { Document d = new Document(); d.add(newField("id", Integer.toString(i), Field.Store.YES, Field.Index.NOT_ANALYZED)); d.add(newField("content", "aaa " + i, Field.Store.NO, Field.Index.ANALYZED)); writer.addDocument(d); } writer.close(); long diskUsage = startDir.getRecomputedActualSizeInBytes(); long diskFree = diskUsage+100; IOException err = null; boolean done = false; // Iterate w/ ever increasing free disk space: while(!done) { MockDirectoryWrapper dir = new MockDirectoryWrapper(random, new RAMDirectory(startDir)); // If IndexReader hits disk full, it can write to // the same files again. dir.setPreventDoubleWrite(false); IndexReader reader = IndexReader.open(dir, false); // For each disk size, first try to commit against // dir that will hit random IOExceptions & disk // full; after, give it infinite disk space & turn // off random IOExceptions & retry w/ same reader: boolean success = false; for(int x=0;x<2;x++) { double rate = 0.05; double diskRatio = ((double) diskFree)/diskUsage; long thisDiskFree; String testName; if (0 == x) { thisDiskFree = diskFree; if (diskRatio >= 2.0) { rate /= 2; } if (diskRatio >= 4.0) { rate /= 2; } if (diskRatio >= 6.0) { rate = 0.0; } if (VERBOSE) { System.out.println("\ncycle: " + diskFree + " bytes"); } testName = "disk full during reader.close() @ " + thisDiskFree + " bytes"; } else { thisDiskFree = 0; rate = 0.0; if (VERBOSE) { System.out.println("\ncycle: same writer: unlimited disk space"); } testName = "reader re-use after disk full"; } dir.setMaxSizeInBytes(thisDiskFree); dir.setRandomIOExceptionRate(rate); try { if (0 == x) { int docId = 12; for(int i=0;i<13;i++) { reader.deleteDocument(docId); reader.setNorm(docId, "content", (float) 2.0); docId += 12; } } reader.close(); success = true; if (0 == x) { done = true; } } catch (IOException e) { if (VERBOSE) { System.out.println(" hit IOException: " + e); e.printStackTrace(System.out); } err = e; if (1 == x) { e.printStackTrace(); fail(testName + " hit IOException after disk space was freed up"); } } // Finally, verify index is not corrupt, and, if // we succeeded, we see all docs changed, and if // we failed, we see either all docs or no docs // changed (transactional semantics): IndexReader newReader = null; try { newReader = IndexReader.open(dir, false); } catch (IOException e) { e.printStackTrace(); fail(testName + ":exception when creating IndexReader after disk full during close: " + e); } /* int result = newReader.docFreq(searchTerm); if (success) { if (result != END_COUNT) { fail(testName + ": method did not throw exception but docFreq('aaa') is " + result + " instead of expected " + END_COUNT); } } else { // On hitting exception we still may have added // all docs: if (result != START_COUNT && result != END_COUNT) { err.printStackTrace(); fail(testName + ": method did throw exception but docFreq('aaa') is " + result + " instead of expected " + START_COUNT + " or " + END_COUNT); } } */ IndexSearcher searcher = newSearcher(newReader); ScoreDoc[] hits = null; try { hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs; } catch (IOException e) { e.printStackTrace(); fail(testName + ": exception when searching: " + e); } int result2 = hits.length; if (success) { if (result2 != END_COUNT) { fail(testName + ": method did not throw exception but hits.length for search on term 'aaa' is " + result2 + " instead of expected " + END_COUNT); } } else { // On hitting exception we still may have added // all docs: if (result2 != START_COUNT && result2 != END_COUNT) { err.printStackTrace(); fail(testName + ": method did throw exception but hits.length for search on term 'aaa' is " + result2 + " instead of expected " + START_COUNT); } } searcher.close(); newReader.close(); if (result2 == END_COUNT) { break; } } dir.close(); // Try again with 10 more bytes of free space: diskFree += 10; } startDir.close(); }
public void init(NamedList args) { Integer v = (Integer)args.get("setTermIndexInterval"); if (v != null) { termInfosIndexDivisor = v.intValue(); } }
public void init(NamedList args) { Integer v = (Integer)args.get("setTermIndexDivisor"); if (v != null) { termInfosIndexDivisor = v.intValue(); } }
public void testDeleteFromIndexWriter() throws Exception { boolean optimize = true; Directory dir1 = new MockRAMDirectory(); IndexWriter writer = new IndexWriter(dir1, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer())); writer.setInfoStream(infoStream);
public void testDeleteFromIndexWriter() throws Exception { boolean optimize = true; Directory dir1 = new MockRAMDirectory(); IndexWriter writer = new IndexWriter(dir1, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setReaderTermsIndexDivisor(2)); writer.setInfoStream(infoStream);
private void run(Configuration conf, Path input, Path output, int numTopics, int numWords, double topicSmoothing, int maxIterations) throws IOException, InterruptedException, ClassNotFoundException { Path stateIn = new Path(output, "state-0"); writeInitialState(stateIn, numTopics, numWords); double oldLL = Double.NEGATIVE_INFINITY; boolean converged = false; for (int iteration = 1; ((maxIterations < 1) || (iteration <= maxIterations)) && !converged; iteration++) { log.info("Iteration {}", iteration); // point the output to a new directory per iteration Path stateOut = new Path(output, "state-" + iteration); double ll = runIteration(conf, input, stateIn, stateOut, numTopics, numWords, topicSmoothing); double relChange = (oldLL - ll) / oldLL; // now point the input to the old output directory log.info("Iteration {} finished. Log Likelihood: {}", iteration, ll); log.info("(Old LL: {})", oldLL); log.info("(Rel Change: {})", relChange); converged = (iteration > 3) && (relChange < OVERALL_CONVERGENCE); stateIn = stateOut; oldLL = ll; } }
private void run(Configuration conf, Path input, Path output, int numTopics, int numWords, double topicSmoothing, int maxIterations) throws IOException, InterruptedException, ClassNotFoundException { Path stateIn = new Path(output, "state-0"); writeInitialState(stateIn, numTopics, numWords); double oldLL = Double.NEGATIVE_INFINITY; boolean converged = false; for (int iteration = 1; ((maxIterations < 1) || (iteration <= maxIterations)) && !converged; iteration++) { log.info("LDA Iteration {}", iteration); // point the output to a new directory per iteration Path stateOut = new Path(output, "state-" + iteration); double ll = runIteration(conf, input, stateIn, stateOut, numTopics, numWords, topicSmoothing); double relChange = (oldLL - ll) / oldLL; // now point the input to the old output directory log.info("Iteration {} finished. Log Likelihood: {}", iteration, ll); log.info("(Old LL: {})", oldLL); log.info("(Rel Change: {})", relChange); converged = (iteration > 3) && (relChange < OVERALL_CONVERGENCE); stateIn = stateOut; oldLL = ll; } }
public void run() { createSnapshot(indexCommit, numberToKeep, replicationHandler); } }.start(); } void createSnapshot(final IndexCommit indexCommit, int numberToKeep, ReplicationHandler replicationHandler) { LOG.info("Creating backup snapshot..."); NamedList<Object> details = new NamedList<Object>(); details.add("startTime", new Date().toString()); File snapShotDir = null; String directoryName = null; Lock lock = null; try { if(numberToKeep<Integer.MAX_VALUE) { deleteOldBackups(numberToKeep); } SimpleDateFormat fmt = new SimpleDateFormat(DATE_FMT, Locale.ROOT); directoryName = "snapshot." + fmt.format(new Date()); lock = lockFactory.makeLock(directoryName + ".lock"); if (lock.isLocked()) return; snapShotDir = new File(snapDir, directoryName); if (!snapShotDir.mkdir()) { LOG.warn("Unable to create snapshot directory: " + snapShotDir.getAbsolutePath()); return; } Collection<String> files = indexCommit.getFileNames(); FileCopier fileCopier = new FileCopier(); Directory dir = solrCore.getDirectoryFactory().get(solrCore.getNewIndexDir(), DirContext.DEFAULT, solrCore.getSolrConfig().indexConfig.lockType); try { fileCopier.copyFiles(dir, files, snapShotDir); } finally { solrCore.getDirectoryFactory().release(dir); } details.add("fileCount", files.size()); details.add("status", "success"); details.add("snapshotCompletedAt", new Date().toString()); } catch (Exception e) { SnapPuller.delTree(snapShotDir); LOG.error("Exception while creating snapshot", e); details.add("snapShootException", e.getMessage()); } finally { replicationHandler.core.getDeletionPolicy().releaseCommitPoint(indexCommit.getGeneration()); replicationHandler.snapShootDetails = details; if (lock != null) { try { lock.release(); } catch (IOException e) { LOG.error("Unable to release snapshoot lock: " + directoryName + ".lock"); } } } }
public void run() { createSnapshot(indexCommit, numberToKeep, replicationHandler); } }.start(); } void createSnapshot(final IndexCommit indexCommit, int numberToKeep, ReplicationHandler replicationHandler) { LOG.info("Creating backup snapshot..."); NamedList<Object> details = new NamedList<Object>(); details.add("startTime", new Date().toString()); File snapShotDir = null; String directoryName = null; Lock lock = null; try { if(numberToKeep<Integer.MAX_VALUE) { deleteOldBackups(numberToKeep); } SimpleDateFormat fmt = new SimpleDateFormat(DATE_FMT, Locale.ROOT); directoryName = "snapshot." + fmt.format(new Date()); lock = lockFactory.makeLock(directoryName + ".lock"); if (lock.isLocked()) return; snapShotDir = new File(snapDir, directoryName); if (!snapShotDir.mkdir()) { LOG.warn("Unable to create snapshot directory: " + snapShotDir.getAbsolutePath()); return; } Collection<String> files = indexCommit.getFileNames(); FileCopier fileCopier = new FileCopier(); Directory dir = solrCore.getDirectoryFactory().get(solrCore.getIndexDir(), DirContext.DEFAULT, solrCore.getSolrConfig().indexConfig.lockType); try { fileCopier.copyFiles(dir, files, snapShotDir); } finally { solrCore.getDirectoryFactory().release(dir); } details.add("fileCount", files.size()); details.add("status", "success"); details.add("snapshotCompletedAt", new Date().toString()); } catch (Exception e) { SnapPuller.delTree(snapShotDir); LOG.error("Exception while creating snapshot", e); details.add("snapShootException", e.getMessage()); } finally { replicationHandler.core.getDeletionPolicy().releaseCommitPoint(indexCommit.getGeneration()); replicationHandler.snapShootDetails = details; if (lock != null) { try { lock.release(); } catch (IOException e) { LOG.error("Unable to release snapshoot lock: " + directoryName + ".lock"); } } } }
public static BloomFilter getFilter(long numElements, int targetBucketsPerElem) { int maxBucketsPerElement = Math.max(1, BloomCalculations.maxBucketsPerElement(numElements)); int bucketsPerElement = Math.min(targetBucketsPerElem, maxBucketsPerElement); if (bucketsPerElement < targetBucketsPerElem) { logger.warn(String.format("Cannot provide an optimal BloomFilter for %d elements (%d/%d buckets per element).", numElements, bucketsPerElement, targetBucketsPerElem)); } BloomCalculations.BloomSpecification spec = BloomCalculations.computeBloomSpec(bucketsPerElement); logger.debug("Creating bloom filter for {} elements and spec {}", numElements, spec); return new BloomFilter(spec.K, bucketsFor(numElements, spec.bucketsPerElement)); }
public static BloomFilter getFilter(long numElements, int targetBucketsPerElem) { int maxBucketsPerElement = Math.max(1, BloomCalculations.maxBucketsPerElement(numElements)); int bucketsPerElement = Math.min(targetBucketsPerElem, maxBucketsPerElement); if (bucketsPerElement < targetBucketsPerElem) { logger.warn(String.format("Cannot provide an optimal BloomFilter for %d elements (%d/%d buckets per element).", numElements, bucketsPerElement, targetBucketsPerElem)); } BloomCalculations.BloomSpecification spec = BloomCalculations.computeBloomSpec(bucketsPerElement); logger.trace("Creating bloom filter for {} elements and spec {}", numElements, spec); return new BloomFilter(spec.K, bucketsFor(numElements, spec.bucketsPerElement)); }
public void testExpirationTimeDeletionPolicy() throws IOException, InterruptedException { final double SECONDS = 2.0; boolean autoCommit = false; boolean useCompoundFile = true; Directory dir = new RAMDirectory(); ExpirationTimeDeletionPolicy policy = new ExpirationTimeDeletionPolicy(dir, SECONDS); IndexWriter writer = new IndexWriter(dir, autoCommit, new WhitespaceAnalyzer(), true, policy); writer.setUseCompoundFile(useCompoundFile); writer.close(); long lastDeleteTime = 0; for(int i=0;i<7;i++) { // Record last time when writer performed deletes of // past commits lastDeleteTime = System.currentTimeMillis(); writer = new IndexWriter(dir, autoCommit, new WhitespaceAnalyzer(), false, policy); writer.setUseCompoundFile(useCompoundFile); for(int j=0;j<17;j++) { addDoc(writer); } writer.close(); // Make sure to sleep long enough so that some commit // points will be deleted: Thread.sleep((int) (1000.0*(SECONDS/5.0))); } // First, make sure the policy in fact deleted something: assertTrue("no commits were deleted", policy.numDelete > 0); // Then simplistic check: just verify that the // segments_N's that still exist are in fact within SECONDS // seconds of the last one's mod time, and, that I can // open a reader on each: long gen = SegmentInfos.getCurrentSegmentGeneration(dir); String fileName = IndexFileNames.fileNameFromGeneration(IndexFileNames.SEGMENTS, "", gen); dir.deleteFile(IndexFileNames.SEGMENTS_GEN); while(gen > 0) { try { IndexReader reader = IndexReader.open(dir); reader.close(); fileName = IndexFileNames.fileNameFromGeneration(IndexFileNames.SEGMENTS, "", gen); long modTime = dir.fileModified(fileName); assertTrue("commit point was older than " + SECONDS + " seconds but did not get deleted", lastDeleteTime - modTime < (SECONDS*1000)); } catch (IOException e) { // OK break; } dir.deleteFile(IndexFileNames.fileNameFromGeneration(IndexFileNames.SEGMENTS, "", gen)); gen--; } dir.close(); }
public void testExpirationTimeDeletionPolicy() throws IOException, InterruptedException { final double SECONDS = 2.0; boolean autoCommit = false; boolean useCompoundFile = true; Directory dir = new RAMDirectory(); ExpirationTimeDeletionPolicy policy = new ExpirationTimeDeletionPolicy(dir, SECONDS); IndexWriter writer = new IndexWriter(dir, autoCommit, new WhitespaceAnalyzer(), true, policy); writer.setUseCompoundFile(useCompoundFile); writer.close(); long lastDeleteTime = 0; for(int i=0;i<7;i++) { // Record last time when writer performed deletes of // past commits lastDeleteTime = System.currentTimeMillis(); writer = new IndexWriter(dir, autoCommit, new WhitespaceAnalyzer(), false, policy); writer.setUseCompoundFile(useCompoundFile); for(int j=0;j<17;j++) { addDoc(writer); } writer.close(); // Make sure to sleep long enough so that some commit // points will be deleted: Thread.sleep((int) (1000.0*(SECONDS/5.0))); } // First, make sure the policy in fact deleted something: assertTrue("no commits were deleted", policy.numDelete > 0); // Then simplistic check: just verify that the // segments_N's that still exist are in fact within SECONDS // seconds of the last one's mod time, and, that I can // open a reader on each: long gen = SegmentInfos.getCurrentSegmentGeneration(dir); String fileName = IndexFileNames.fileNameFromGeneration(IndexFileNames.SEGMENTS, "", gen); dir.deleteFile(IndexFileNames.SEGMENTS_GEN); while(gen > 0) { try { IndexReader reader = IndexReader.open(dir); reader.close(); fileName = IndexFileNames.fileNameFromGeneration(IndexFileNames.SEGMENTS, "", gen); long modTime = dir.fileModified(fileName); assertTrue("commit point was older than " + SECONDS + " seconds (" + (lastDeleteTime - modTime) + " msec) but did not get deleted", lastDeleteTime - modTime <= (SECONDS*1000)); } catch (IOException e) { // OK break; } dir.deleteFile(IndexFileNames.fileNameFromGeneration(IndexFileNames.SEGMENTS, "", gen)); gen--; } dir.close(); }
public void testPropsDefaults() throws Exception { IndexWriter writer = new ExposeWriterHandler().getWriter(); ConcurrentMergeScheduler cms = (ConcurrentMergeScheduler)writer.getMergeScheduler(); assertEquals(10, cms.getMaxThreadCount()); }
public void testPropsDefaults() throws Exception { IndexWriter writer = new ExposeWriterHandler().getWriter(); ConcurrentMergeScheduler cms = (ConcurrentMergeScheduler)writer.getMergeScheduler(); assertEquals(4, cms.getMaxThreadCount()); }
public TokenStream init(TokenStream tokenStream) { return null; } }); highlighter.setTextFragmenter(new SimpleFragmenter(2000)); TokenStream tokenStream = analyzer.tokenStream(FIELD_NAME, new StringReader(rawDocContent)); String encodedSnippet = highlighter.getBestFragments(tokenStream, rawDocContent, 1, ""); // An ugly bit of XML creation: String xhtml = "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n" + "<html xmlns=\"http://www.w3.org/1999/xhtml\" xml:lang=\"en\" lang=\"en\">\n" + "<head>\n" + "<title>My Test HTML Document</title>\n" + "</head>\n" + "<body>\n" + "<h2>" + encodedSnippet + "</h2>\n" + "</body>\n" + "</html>"; // now an ugly built of XML parsing to test the snippet is encoded OK DocumentBuilderFactory dbf = DocumentBuilderFactory.newInstance(); DocumentBuilder db = dbf.newDocumentBuilder(); org.w3c.dom.Document doc = db.parse(new ByteArrayInputStream(xhtml.getBytes())); Element root = doc.getDocumentElement(); NodeList nodes = root.getElementsByTagName("body"); Element body = (Element) nodes.item(0); nodes = body.getElementsByTagName("h2"); Element h2 = (Element) nodes.item(0); String decodedSnippet = h2.getFirstChild().getNodeValue(); assertEquals("XHTML Encoding should have worked:", rawDocContent, decodedSnippet); }
public TokenStream init(TokenStream tokenStream) { return null; } }); highlighter.setTextFragmenter(new SimpleFragmenter(2000)); TokenStream tokenStream = analyzer.tokenStream(FIELD_NAME, new StringReader(rawDocContent)); String encodedSnippet = highlighter.getBestFragments(tokenStream, rawDocContent, 1, ""); // An ugly bit of XML creation: String xhtml = "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n" + "<html xmlns=\"http://www.w3.org/1999/xhtml\" xml:lang=\"en\" lang=\"en\">\n" + "<head>\n" + "<title>My Test HTML Document</title>\n" + "</head>\n" + "<body>\n" + "<h2>" + encodedSnippet + "</h2>\n" + "</body>\n" + "</html>"; // now an ugly built of XML parsing to test the snippet is encoded OK DocumentBuilderFactory dbf = DocumentBuilderFactory.newInstance(); DocumentBuilder db = dbf.newDocumentBuilder(); org.w3c.dom.Document doc = db.parse(new ByteArrayInputStream(xhtml.getBytes("UTF-8"))); Element root = doc.getDocumentElement(); NodeList nodes = root.getElementsByTagName("body"); Element body = (Element) nodes.item(0); nodes = body.getElementsByTagName("h2"); Element h2 = (Element) nodes.item(0); String decodedSnippet = h2.getFirstChild().getNodeValue(); assertEquals("XHTML Encoding should have worked:", rawDocContent, decodedSnippet); }
public SSTableSimpleUnsortedWriter(File directory, String keyspace, String columnFamily, AbstractType comparator, AbstractType subComparator, int bufferSizeInMB) throws IOException { super(directory, new CFMetaData(keyspace, columnFamily, subComparator == null ? ColumnFamilyType.Standard : ColumnFamilyType.Super, comparator, subComparator)); this.bufferSize = bufferSizeInMB * 1024 * 1024; }
public SSTableSimpleUnsortedWriter(File directory, String keyspace, String columnFamily, AbstractType comparator, AbstractType subComparator, int bufferSizeInMB) throws IOException { super(directory, new CFMetaData(keyspace, columnFamily, subComparator == null ? ColumnFamilyType.Standard : ColumnFamilyType.Super, comparator, subComparator)); this.bufferSize = bufferSizeInMB * 1024L * 1024L; }
public void testEnablingServer() throws Exception { assertTrue(! healthcheckFile.exists()); // first make sure that ping responds back that the service is disabled SolrQueryResponse sqr = makeRequest(handler, req()); SolrException se = (SolrException) sqr.getException(); assertEquals( "Response should have been replaced with a 503 SolrException.", se.code(), SolrException.ErrorCode.SERVICE_UNAVAILABLE.code); // now enable makeRequest(handler, req("action", "enable")); assertTrue(healthcheckFile.exists()); assertNotNull(FileUtils.readFileToString(healthcheckFile), "UTF-8"); // now verify that the handler response with success SolrQueryResponse rsp = makeRequest(handler, req()); assertEquals("OK", rsp.getValues().get("status")); // enable when already enabled shouldn't cause any problems makeRequest(handler, req("action", "enable")); assertTrue(healthcheckFile.exists()); }
public void testEnablingServer() throws Exception { assertTrue(! healthcheckFile.exists()); // first make sure that ping responds back that the service is disabled SolrQueryResponse sqr = makeRequest(handler, req()); SolrException se = (SolrException) sqr.getException(); assertEquals( "Response should have been replaced with a 503 SolrException.", se.code(), SolrException.ErrorCode.SERVICE_UNAVAILABLE.code); // now enable makeRequest(handler, req("action", "enable")); assertTrue(healthcheckFile.exists()); assertNotNull(FileUtils.readFileToString(healthcheckFile, "UTF-8")); // now verify that the handler response with success SolrQueryResponse rsp = makeRequest(handler, req()); assertEquals("OK", rsp.getValues().get("status")); // enable when already enabled shouldn't cause any problems makeRequest(handler, req("action", "enable")); assertTrue(healthcheckFile.exists()); }
private void overwriteStopwords(String stopwords) throws IOException { SolrCore core = h.getCoreContainer().getCore(collection); try { String configDir = core.getResourceLoader().getConfigDir(); FileUtils.moveFile(new File(configDir, "stopwords.txt"), new File(configDir, "stopwords.txt.bak")); File file = new File(configDir, "stopwords.txt"); FileUtils.writeStringToFile(file, stopwords); } finally { core.close(); } }
private void overwriteStopwords(String stopwords) throws IOException { SolrCore core = h.getCoreContainer().getCore(collection); try { String configDir = core.getResourceLoader().getConfigDir(); FileUtils.moveFile(new File(configDir, "stopwords.txt"), new File(configDir, "stopwords.txt.bak")); File file = new File(configDir, "stopwords.txt"); FileUtils.writeStringToFile(file, stopwords, "UTF-8"); } finally { core.close(); } }
private void writeCustomConfig(String coreName, String config, String schema, String rand_snip) throws IOException { File coreRoot = new File(solrHomeDirectory, coreName); File subHome = new File(coreRoot, "conf"); if (!coreRoot.exists()) { assertTrue("Failed to make subdirectory ", coreRoot.mkdirs()); } // Write the file for core discovery FileUtils.writeStringToFile(new File(coreRoot, "core.properties"), "name=" + coreName + System.getProperty("line.separator") + "transient=true" + System.getProperty("line.separator") + "loadOnStartup=true", Charsets.UTF_8.toString()); FileUtils.writeStringToFile(new File(subHome, "solrconfig.snippet.randomindexconfig.xml"), rand_snip); FileUtils.writeStringToFile(new File(subHome, "solrconfig.xml"), config, Charsets.UTF_8.toString()); FileUtils.writeStringToFile(new File(subHome, "schema.xml"), schema, Charsets.UTF_8.toString()); }
private void writeCustomConfig(String coreName, String config, String schema, String rand_snip) throws IOException { File coreRoot = new File(solrHomeDirectory, coreName); File subHome = new File(coreRoot, "conf"); if (!coreRoot.exists()) { assertTrue("Failed to make subdirectory ", coreRoot.mkdirs()); } // Write the file for core discovery FileUtils.writeStringToFile(new File(coreRoot, "core.properties"), "name=" + coreName + System.getProperty("line.separator") + "transient=true" + System.getProperty("line.separator") + "loadOnStartup=true", Charsets.UTF_8.toString()); FileUtils.writeStringToFile(new File(subHome, "solrconfig.snippet.randomindexconfig.xml"), rand_snip, Charsets.UTF_8.toString()); FileUtils.writeStringToFile(new File(subHome, "solrconfig.xml"), config, Charsets.UTF_8.toString()); FileUtils.writeStringToFile(new File(subHome, "schema.xml"), schema, Charsets.UTF_8.toString()); }
public TermStats merge(final MergeState mergeState, final DocsEnum postings, final FixedBitSet visitedDocs) throws IOException { int df = 0; long totTF = 0; IndexOptions indexOptions = mergeState.fieldInfo.getIndexOptions(); if (indexOptions == IndexOptions.DOCS_ONLY) { while(true) { final int doc = postings.nextDoc(); if (doc == DocIdSetIterator.NO_MORE_DOCS) { break; } visitedDocs.set(doc); this.startDoc(doc, 0); this.finishDoc(); df++; } totTF = -1; } else if (indexOptions == IndexOptions.DOCS_AND_FREQS) { while(true) { final int doc = postings.nextDoc(); if (doc == DocIdSetIterator.NO_MORE_DOCS) { break; } visitedDocs.set(doc); final int freq = postings.freq(); this.startDoc(doc, freq); this.finishDoc(); df++; totTF += freq; } } else if (indexOptions == IndexOptions.DOCS_AND_FREQS_AND_POSITIONS) { final DocsAndPositionsEnum postingsEnum = (DocsAndPositionsEnum) postings; while(true) { final int doc = postingsEnum.nextDoc(); if (doc == DocIdSetIterator.NO_MORE_DOCS) { break; } visitedDocs.set(doc); final int freq = postingsEnum.freq(); this.startDoc(doc, freq); totTF += freq; for(int i=0;i<freq;i++) { final int position = postingsEnum.nextPosition(); final BytesRef payload; if (postingsEnum.hasPayload()) { payload = postingsEnum.getPayload(); } else { payload = null; } this.addPosition(position, payload, -1, -1); } this.finishDoc(); df++; } } else { assert indexOptions == IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS; final DocsAndPositionsEnum postingsEnum = (DocsAndPositionsEnum) postings; while(true) { final int doc = postingsEnum.nextDoc(); if (doc == DocIdSetIterator.NO_MORE_DOCS) { break; } visitedDocs.set(doc); final int freq = postingsEnum.freq(); this.startDoc(doc, freq); totTF += freq; for(int i=0;i<freq;i++) { final int position = postingsEnum.nextPosition(); final BytesRef payload; if (postingsEnum.hasPayload()) { payload = postingsEnum.getPayload(); } else { payload = null; } this.addPosition(position, payload, postingsEnum.startOffset(), postingsEnum.endOffset()); } this.finishDoc(); df++; } } return new TermStats(df, totTF); }
public TermStats merge(final MergeState mergeState, final DocsEnum postings, final FixedBitSet visitedDocs) throws IOException { int df = 0; long totTF = 0; IndexOptions indexOptions = mergeState.fieldInfo.getIndexOptions(); if (indexOptions == IndexOptions.DOCS_ONLY) { while(true) { final int doc = postings.nextDoc(); if (doc == DocIdSetIterator.NO_MORE_DOCS) { break; } visitedDocs.set(doc); this.startDoc(doc, 0); this.finishDoc(); df++; } totTF = -1; } else if (indexOptions == IndexOptions.DOCS_AND_FREQS) { while(true) { final int doc = postings.nextDoc(); if (doc == DocIdSetIterator.NO_MORE_DOCS) { break; } visitedDocs.set(doc); final int freq = postings.freq(); this.startDoc(doc, freq); this.finishDoc(); df++; totTF += freq; } } else if (indexOptions == IndexOptions.DOCS_AND_FREQS_AND_POSITIONS) { final DocsAndPositionsEnum postingsEnum = (DocsAndPositionsEnum) postings; while(true) { final int doc = postingsEnum.nextDoc(); if (doc == DocIdSetIterator.NO_MORE_DOCS) { break; } visitedDocs.set(doc); final int freq = postingsEnum.freq(); this.startDoc(doc, freq); totTF += freq; for(int i=0;i<freq;i++) { final int position = postingsEnum.nextPosition(); final BytesRef payload; if (postingsEnum.hasPayload()) { payload = postingsEnum.getPayload(); } else { payload = null; } this.addPosition(position, payload, -1, -1); } this.finishDoc(); df++; } } else { assert indexOptions == IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS; final DocsAndPositionsEnum postingsEnum = (DocsAndPositionsEnum) postings; while(true) { final int doc = postingsEnum.nextDoc(); if (doc == DocIdSetIterator.NO_MORE_DOCS) { break; } visitedDocs.set(doc); final int freq = postingsEnum.freq(); this.startDoc(doc, freq); totTF += freq; for(int i=0;i<freq;i++) { final int position = postingsEnum.nextPosition(); final BytesRef payload; if (postingsEnum.hasPayload()) { payload = postingsEnum.getPayload(); } else { payload = null; } this.addPosition(position, payload, postingsEnum.startOffset(), postingsEnum.endOffset()); } this.finishDoc(); df++; } } return new TermStats(df, indexOptions == IndexOptions.DOCS_ONLY ? -1 : totTF); }
public void merge(MergeState mergeState, TermsEnum termsEnum) throws IOException { BytesRef term; assert termsEnum != null; long sumTotalTermFreq = 0; long sumDocFreq = 0; long sumDFsinceLastAbortCheck = 0; FixedBitSet visitedDocs = new FixedBitSet(mergeState.segmentInfo.getDocCount()); IndexOptions indexOptions = mergeState.fieldInfo.getIndexOptions(); if (indexOptions == IndexOptions.DOCS_ONLY) { if (docsEnum == null) { docsEnum = new MappingMultiDocsEnum(); } docsEnum.setMergeState(mergeState); MultiDocsEnum docsEnumIn = null; while((term = termsEnum.next()) != null) { // We can pass null for liveDocs, because the // mapping enum will skip the non-live docs: docsEnumIn = (MultiDocsEnum) termsEnum.docs(null, docsEnumIn, false); if (docsEnumIn != null) { docsEnum.reset(docsEnumIn); final PostingsConsumer postingsConsumer = startTerm(term); final TermStats stats = postingsConsumer.merge(mergeState, docsEnum, visitedDocs); if (stats.docFreq > 0) { finishTerm(term, stats); sumTotalTermFreq += stats.docFreq; sumDFsinceLastAbortCheck += stats.docFreq; sumDocFreq += stats.docFreq; if (sumDFsinceLastAbortCheck > 60000) { mergeState.checkAbort.work(sumDFsinceLastAbortCheck/5.0); sumDFsinceLastAbortCheck = 0; } } } } } else if (indexOptions == IndexOptions.DOCS_AND_FREQS) { if (docsAndFreqsEnum == null) { docsAndFreqsEnum = new MappingMultiDocsEnum(); } docsAndFreqsEnum.setMergeState(mergeState); MultiDocsEnum docsAndFreqsEnumIn = null; while((term = termsEnum.next()) != null) { // We can pass null for liveDocs, because the // mapping enum will skip the non-live docs: docsAndFreqsEnumIn = (MultiDocsEnum) termsEnum.docs(null, docsAndFreqsEnumIn, true); assert docsAndFreqsEnumIn != null; docsAndFreqsEnum.reset(docsAndFreqsEnumIn); final PostingsConsumer postingsConsumer = startTerm(term); final TermStats stats = postingsConsumer.merge(mergeState, docsAndFreqsEnum, visitedDocs); if (stats.docFreq > 0) { finishTerm(term, stats); sumTotalTermFreq += stats.totalTermFreq; sumDFsinceLastAbortCheck += stats.docFreq; sumDocFreq += stats.docFreq; if (sumDFsinceLastAbortCheck > 60000) { mergeState.checkAbort.work(sumDFsinceLastAbortCheck/5.0); sumDFsinceLastAbortCheck = 0; } } } } else if (indexOptions == IndexOptions.DOCS_AND_FREQS_AND_POSITIONS) { if (postingsEnum == null) { postingsEnum = new MappingMultiDocsAndPositionsEnum(); } postingsEnum.setMergeState(mergeState); MultiDocsAndPositionsEnum postingsEnumIn = null; while((term = termsEnum.next()) != null) { // We can pass null for liveDocs, because the // mapping enum will skip the non-live docs: postingsEnumIn = (MultiDocsAndPositionsEnum) termsEnum.docsAndPositions(null, postingsEnumIn, false); assert postingsEnumIn != null; postingsEnum.reset(postingsEnumIn); // set PayloadProcessor if (mergeState.payloadProcessorProvider != null) { for (int i = 0; i < mergeState.readers.size(); i++) { if (mergeState.readerPayloadProcessor[i] != null) { mergeState.currentPayloadProcessor[i] = mergeState.readerPayloadProcessor[i].getProcessor(mergeState.fieldInfo.name, term); } } } final PostingsConsumer postingsConsumer = startTerm(term); final TermStats stats = postingsConsumer.merge(mergeState, postingsEnum, visitedDocs); if (stats.docFreq > 0) { finishTerm(term, stats); sumTotalTermFreq += stats.totalTermFreq; sumDFsinceLastAbortCheck += stats.docFreq; sumDocFreq += stats.docFreq; if (sumDFsinceLastAbortCheck > 60000) { mergeState.checkAbort.work(sumDFsinceLastAbortCheck/5.0); sumDFsinceLastAbortCheck = 0; } } } } else { assert indexOptions == IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS; if (postingsEnum == null) { postingsEnum = new MappingMultiDocsAndPositionsEnum(); } postingsEnum.setMergeState(mergeState); MultiDocsAndPositionsEnum postingsEnumIn = null; while((term = termsEnum.next()) != null) { // We can pass null for liveDocs, because the // mapping enum will skip the non-live docs: postingsEnumIn = (MultiDocsAndPositionsEnum) termsEnum.docsAndPositions(null, postingsEnumIn, true); assert postingsEnumIn != null; postingsEnum.reset(postingsEnumIn); // set PayloadProcessor if (mergeState.payloadProcessorProvider != null) { for (int i = 0; i < mergeState.readers.size(); i++) { if (mergeState.readerPayloadProcessor[i] != null) { mergeState.currentPayloadProcessor[i] = mergeState.readerPayloadProcessor[i].getProcessor(mergeState.fieldInfo.name, term); } } } final PostingsConsumer postingsConsumer = startTerm(term); final TermStats stats = postingsConsumer.merge(mergeState, postingsEnum, visitedDocs); if (stats.docFreq > 0) { finishTerm(term, stats); sumTotalTermFreq += stats.totalTermFreq; sumDFsinceLastAbortCheck += stats.docFreq; sumDocFreq += stats.docFreq; if (sumDFsinceLastAbortCheck > 60000) { mergeState.checkAbort.work(sumDFsinceLastAbortCheck/5.0); sumDFsinceLastAbortCheck = 0; } } } } finish(sumTotalTermFreq, sumDocFreq, visitedDocs.cardinality()); }
public void merge(MergeState mergeState, TermsEnum termsEnum) throws IOException { BytesRef term; assert termsEnum != null; long sumTotalTermFreq = 0; long sumDocFreq = 0; long sumDFsinceLastAbortCheck = 0; FixedBitSet visitedDocs = new FixedBitSet(mergeState.segmentInfo.getDocCount()); IndexOptions indexOptions = mergeState.fieldInfo.getIndexOptions(); if (indexOptions == IndexOptions.DOCS_ONLY) { if (docsEnum == null) { docsEnum = new MappingMultiDocsEnum(); } docsEnum.setMergeState(mergeState); MultiDocsEnum docsEnumIn = null; while((term = termsEnum.next()) != null) { // We can pass null for liveDocs, because the // mapping enum will skip the non-live docs: docsEnumIn = (MultiDocsEnum) termsEnum.docs(null, docsEnumIn, false); if (docsEnumIn != null) { docsEnum.reset(docsEnumIn); final PostingsConsumer postingsConsumer = startTerm(term); final TermStats stats = postingsConsumer.merge(mergeState, docsEnum, visitedDocs); if (stats.docFreq > 0) { finishTerm(term, stats); sumTotalTermFreq += stats.docFreq; sumDFsinceLastAbortCheck += stats.docFreq; sumDocFreq += stats.docFreq; if (sumDFsinceLastAbortCheck > 60000) { mergeState.checkAbort.work(sumDFsinceLastAbortCheck/5.0); sumDFsinceLastAbortCheck = 0; } } } } } else if (indexOptions == IndexOptions.DOCS_AND_FREQS) { if (docsAndFreqsEnum == null) { docsAndFreqsEnum = new MappingMultiDocsEnum(); } docsAndFreqsEnum.setMergeState(mergeState); MultiDocsEnum docsAndFreqsEnumIn = null; while((term = termsEnum.next()) != null) { // We can pass null for liveDocs, because the // mapping enum will skip the non-live docs: docsAndFreqsEnumIn = (MultiDocsEnum) termsEnum.docs(null, docsAndFreqsEnumIn, true); assert docsAndFreqsEnumIn != null; docsAndFreqsEnum.reset(docsAndFreqsEnumIn); final PostingsConsumer postingsConsumer = startTerm(term); final TermStats stats = postingsConsumer.merge(mergeState, docsAndFreqsEnum, visitedDocs); if (stats.docFreq > 0) { finishTerm(term, stats); sumTotalTermFreq += stats.totalTermFreq; sumDFsinceLastAbortCheck += stats.docFreq; sumDocFreq += stats.docFreq; if (sumDFsinceLastAbortCheck > 60000) { mergeState.checkAbort.work(sumDFsinceLastAbortCheck/5.0); sumDFsinceLastAbortCheck = 0; } } } } else if (indexOptions == IndexOptions.DOCS_AND_FREQS_AND_POSITIONS) { if (postingsEnum == null) { postingsEnum = new MappingMultiDocsAndPositionsEnum(); } postingsEnum.setMergeState(mergeState); MultiDocsAndPositionsEnum postingsEnumIn = null; while((term = termsEnum.next()) != null) { // We can pass null for liveDocs, because the // mapping enum will skip the non-live docs: postingsEnumIn = (MultiDocsAndPositionsEnum) termsEnum.docsAndPositions(null, postingsEnumIn, false); assert postingsEnumIn != null; postingsEnum.reset(postingsEnumIn); // set PayloadProcessor if (mergeState.payloadProcessorProvider != null) { for (int i = 0; i < mergeState.readers.size(); i++) { if (mergeState.readerPayloadProcessor[i] != null) { mergeState.currentPayloadProcessor[i] = mergeState.readerPayloadProcessor[i].getProcessor(mergeState.fieldInfo.name, term); } } } final PostingsConsumer postingsConsumer = startTerm(term); final TermStats stats = postingsConsumer.merge(mergeState, postingsEnum, visitedDocs); if (stats.docFreq > 0) { finishTerm(term, stats); sumTotalTermFreq += stats.totalTermFreq; sumDFsinceLastAbortCheck += stats.docFreq; sumDocFreq += stats.docFreq; if (sumDFsinceLastAbortCheck > 60000) { mergeState.checkAbort.work(sumDFsinceLastAbortCheck/5.0); sumDFsinceLastAbortCheck = 0; } } } } else { assert indexOptions == IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS; if (postingsEnum == null) { postingsEnum = new MappingMultiDocsAndPositionsEnum(); } postingsEnum.setMergeState(mergeState); MultiDocsAndPositionsEnum postingsEnumIn = null; while((term = termsEnum.next()) != null) { // We can pass null for liveDocs, because the // mapping enum will skip the non-live docs: postingsEnumIn = (MultiDocsAndPositionsEnum) termsEnum.docsAndPositions(null, postingsEnumIn, true); assert postingsEnumIn != null; postingsEnum.reset(postingsEnumIn); // set PayloadProcessor if (mergeState.payloadProcessorProvider != null) { for (int i = 0; i < mergeState.readers.size(); i++) { if (mergeState.readerPayloadProcessor[i] != null) { mergeState.currentPayloadProcessor[i] = mergeState.readerPayloadProcessor[i].getProcessor(mergeState.fieldInfo.name, term); } } } final PostingsConsumer postingsConsumer = startTerm(term); final TermStats stats = postingsConsumer.merge(mergeState, postingsEnum, visitedDocs); if (stats.docFreq > 0) { finishTerm(term, stats); sumTotalTermFreq += stats.totalTermFreq; sumDFsinceLastAbortCheck += stats.docFreq; sumDocFreq += stats.docFreq; if (sumDFsinceLastAbortCheck > 60000) { mergeState.checkAbort.work(sumDFsinceLastAbortCheck/5.0); sumDFsinceLastAbortCheck = 0; } } } } finish(indexOptions == IndexOptions.DOCS_ONLY ? -1 : sumTotalTermFreq, sumDocFreq, visitedDocs.cardinality()); }
public static final CharArraySet DEFAULT_ARTICLES = CharArraySet.unmodifiableSet( new CharArraySet(Version.LUCENE_CURRENT, Arrays.asList( "l", "m", "t", "qu", "n", "s", "j"), true));
public static final CharArraySet DEFAULT_ARTICLES = CharArraySet.unmodifiableSet( new CharArraySet(Version.LUCENE_CURRENT, Arrays.asList( "l", "m", "t", "qu", "n", "s", "j", "d", "c", "jusqu", "quoiqu", "lorsqu", "puisqu"), true));
public static void beforeTest() throws Exception { initCore(EXAMPLE_CONFIG, EXAMPLE_SCHEMA); }
public static void beforeTest() throws Exception { initCore(EXAMPLE_CONFIG, EXAMPLE_SCHEMA, EXAMPLE_HOME); }
public void run() { try { //wait on messaging service to start listening MessagingService.instance.waitUntilListening(); synchronized( Gossiper.instance ) { /* Update the local heartbeat counter. */ endPointStateMap_.get(localEndPoint_).getHeartBeatState().updateHeartBeat(); List<GossipDigest> gDigests = new ArrayList<GossipDigest>(); Gossiper.instance.makeRandomGossipDigest(gDigests); if ( gDigests.size() > 0 ) { Message message = makeGossipDigestSynMessage(gDigests); /* Gossip to some random live member */ boolean gossipedToSeed = doGossipToLiveMember(message); /* Gossip to some unreachable member with some probability to check if he is back up */ doGossipToUnreachableMember(message); /* Gossip to a seed if we did not do so above, or we have seen less nodes than there are seeds. This prevents partitions where each group of nodes is only gossiping to a subset of the seeds. The most straightforward check would be to check that all the seeds have been verified either as live or unreachable. To avoid that computation each round, we reason that: either all the live nodes are seeds, in which case non-seeds that come online will introduce themselves to a member of the ring by definition, or there is at least one non-seed node in the list, in which case eventually someone will gossip to it, and then do a gossip to a random seed from the gossipedToSeed check. See CASSANDRA-150 for more exposition. */ if (!gossipedToSeed || liveEndpoints_.size() < seeds_.size()) doGossipToSeed(message); if (logger_.isTraceEnabled()) logger_.trace("Performing status check ..."); doStatusCheck(); } } } catch (Exception e) { throw new RuntimeException(e); } }
public void run() { try { //wait on messaging service to start listening MessagingService.instance.waitUntilListening(); synchronized( Gossiper.instance ) { /* Update the local heartbeat counter. */ endPointStateMap_.get(localEndPoint_).getHeartBeatState().updateHeartBeat(); List<GossipDigest> gDigests = new ArrayList<GossipDigest>(); Gossiper.instance.makeRandomGossipDigest(gDigests); if ( gDigests.size() > 0 ) { Message message = makeGossipDigestSynMessage(gDigests); /* Gossip to some random live member */ boolean gossipedToSeed = doGossipToLiveMember(message); /* Gossip to some unreachable member with some probability to check if he is back up */ doGossipToUnreachableMember(message); /* Gossip to a seed if we did not do so above, or we have seen less nodes than there are seeds. This prevents partitions where each group of nodes is only gossiping to a subset of the seeds. The most straightforward check would be to check that all the seeds have been verified either as live or unreachable. To avoid that computation each round, we reason that: either all the live nodes are seeds, in which case non-seeds that come online will introduce themselves to a member of the ring by definition, or there is at least one non-seed node in the list, in which case eventually someone will gossip to it, and then do a gossip to a random seed from the gossipedToSeed check. See CASSANDRA-150 for more exposition. */ if (!gossipedToSeed || liveEndpoints_.size() < seeds_.size()) doGossipToSeed(message); if (logger_.isTraceEnabled()) logger_.trace("Performing status check ..."); doStatusCheck(); } } } catch (Exception e) { logger_.error("Gossip error", e); } }
private final void invertDocument(Document doc) throws IOException { Enumeration fields = doc.fields(); while (fields.hasMoreElements()) { Fieldable field = (Fieldable) fields.nextElement(); String fieldName = field.name(); int fieldNumber = fieldInfos.fieldNumber(fieldName); int length = fieldLengths[fieldNumber]; // length of field int position = fieldPositions[fieldNumber]; // position in field if (length>0) position+=analyzer.getPositionIncrementGap(fieldName); int offset = fieldOffsets[fieldNumber]; // offset field if (field.isIndexed()) { if (!field.isTokenized()) { // un-tokenized field String stringValue = field.stringValue(); if(field.isStoreOffsetWithTermVector()) addPosition(fieldName, stringValue, position++, new TermVectorOffsetInfo(offset, offset + stringValue.length())); else addPosition(fieldName, stringValue, position++, null); offset += stringValue.length(); length++; } else { Reader reader; // find or make Reader if (field.readerValue() != null) reader = field.readerValue(); else if (field.stringValue() != null) reader = new StringReader(field.stringValue()); else throw new IllegalArgumentException ("field must have either String or Reader value"); // Tokenize field and add to postingTable TokenStream stream = analyzer.tokenStream(fieldName, reader); try { Token lastToken = null; for (Token t = stream.next(); t != null; t = stream.next()) { position += (t.getPositionIncrement() - 1); if(field.isStoreOffsetWithTermVector()) addPosition(fieldName, t.termText(), position++, new TermVectorOffsetInfo(offset + t.startOffset(), offset + t.endOffset())); else addPosition(fieldName, t.termText(), position++, null); lastToken = t; if (++length > maxFieldLength) { if (infoStream != null) infoStream.println("maxFieldLength " +maxFieldLength+ " reached, ignoring following tokens"); break; } } if(lastToken != null) offset += lastToken.endOffset() + 1; } finally { stream.close(); } } fieldLengths[fieldNumber] = length; // save field length fieldPositions[fieldNumber] = position; // save field position fieldBoosts[fieldNumber] *= field.getBoost(); fieldOffsets[fieldNumber] = offset; } } }
private final void invertDocument(Document doc) throws IOException { Enumeration fields = doc.fields(); while (fields.hasMoreElements()) { Fieldable field = (Fieldable) fields.nextElement(); String fieldName = field.name(); int fieldNumber = fieldInfos.fieldNumber(fieldName); int length = fieldLengths[fieldNumber]; // length of field int position = fieldPositions[fieldNumber]; // position in field if (length>0) position+=analyzer.getPositionIncrementGap(fieldName); int offset = fieldOffsets[fieldNumber]; // offset field if (field.isIndexed()) { if (!field.isTokenized()) { // un-tokenized field String stringValue = field.stringValue(); if(field.isStoreOffsetWithTermVector()) addPosition(fieldName, stringValue, position++, new TermVectorOffsetInfo(offset, offset + stringValue.length())); else addPosition(fieldName, stringValue, position++, null); offset += stringValue.length(); length++; } else { Reader reader; // find or make Reader if (field.readerValue() != null) reader = field.readerValue(); else if (field.stringValue() != null) reader = new StringReader(field.stringValue()); else throw new IllegalArgumentException ("field must have either String or Reader value"); // Tokenize field and add to postingTable TokenStream stream = analyzer.tokenStream(fieldName, reader); try { Token lastToken = null; for (Token t = stream.next(); t != null; t = stream.next()) { position += (t.getPositionIncrement() - 1); if(field.isStoreOffsetWithTermVector()) addPosition(fieldName, t.termText(), position++, new TermVectorOffsetInfo(offset + t.startOffset(), offset + t.endOffset())); else addPosition(fieldName, t.termText(), position++, null); lastToken = t; if (++length >= maxFieldLength) { if (infoStream != null) infoStream.println("maxFieldLength " +maxFieldLength+ " reached, ignoring following tokens"); break; } } if(lastToken != null) offset += lastToken.endOffset() + 1; } finally { stream.close(); } } fieldLengths[fieldNumber] = length; // save field length fieldPositions[fieldNumber] = position; // save field position fieldBoosts[fieldNumber] *= field.getBoost(); fieldOffsets[fieldNumber] = offset; } } }
public void run() { logDroppedMessages(); } }; Timer timer = new Timer("DroppedMessagesLogger"); timer.schedule(logDropped, LOG_DROPPED_INTERVAL_IN_MS, LOG_DROPPED_INTERVAL_IN_MS); MBeanServer mbs = ManagementFactory.getPlatformMBeanServer(); try { mbs.registerMBean(this, new ObjectName("org.apache.cassandra.concurrent:type=MESSAGING-SERVICE-POOL")); } catch (Exception e) { throw new RuntimeException(e); } }
public void run() { logDroppedMessages(); } }; Timer timer = new Timer("DroppedMessagesLogger"); timer.schedule(logDropped, LOG_DROPPED_INTERVAL_IN_MS, LOG_DROPPED_INTERVAL_IN_MS); MBeanServer mbs = ManagementFactory.getPlatformMBeanServer(); try { mbs.registerMBean(this, new ObjectName("org.apache.cassandra.net:type=MessagingService")); } catch (Exception e) { throw new RuntimeException(e); } }
public static final long KEEPALIVE = 60; // seconds to keep "extra" threads alive for when idle static { stages.put(Stage.MUTATION, multiThreadedConfigurableStage(Stage.MUTATION, getConcurrentWriters())); stages.put(Stage.READ, multiThreadedConfigurableStage(Stage.READ, getConcurrentReaders())); stages.put(Stage.REQUEST_RESPONSE, multiThreadedStage(Stage.REQUEST_RESPONSE, Math.max(2, Runtime.getRuntime().availableProcessors()))); stages.put(Stage.INTERNAL_RESPONSE, multiThreadedStage(Stage.INTERNAL_RESPONSE, Math.max(2, Runtime.getRuntime().availableProcessors()))); // the rest are all single-threaded stages.put(Stage.STREAM, new JMXEnabledThreadPoolExecutor(Stage.STREAM)); stages.put(Stage.GOSSIP, new JMXEnabledThreadPoolExecutor(Stage.GOSSIP)); stages.put(Stage.ANTIENTROPY, new JMXEnabledThreadPoolExecutor(Stage.ANTIENTROPY)); stages.put(Stage.MIGRATION, new JMXEnabledThreadPoolExecutor(Stage.MIGRATION)); stages.put(Stage.MISC, new JMXEnabledThreadPoolExecutor(Stage.MISC)); }
public static final long KEEPALIVE = 60; // seconds to keep "extra" threads alive for when idle static { stages.put(Stage.MUTATION, multiThreadedConfigurableStage(Stage.MUTATION, getConcurrentWriters())); stages.put(Stage.READ, multiThreadedConfigurableStage(Stage.READ, getConcurrentReaders())); stages.put(Stage.REQUEST_RESPONSE, multiThreadedStage(Stage.REQUEST_RESPONSE, Math.max(2, Runtime.getRuntime().availableProcessors()))); stages.put(Stage.INTERNAL_RESPONSE, multiThreadedStage(Stage.INTERNAL_RESPONSE, Math.max(2, Runtime.getRuntime().availableProcessors()))); // the rest are all single-threaded stages.put(Stage.STREAM, new JMXEnabledThreadPoolExecutor(Stage.STREAM)); stages.put(Stage.GOSSIP, new JMXEnabledThreadPoolExecutor(Stage.GOSSIP)); stages.put(Stage.ANTI_ENTROPY, new JMXEnabledThreadPoolExecutor(Stage.ANTI_ENTROPY)); stages.put(Stage.MIGRATION, new JMXEnabledThreadPoolExecutor(Stage.MIGRATION)); stages.put(Stage.MISC, new JMXEnabledThreadPoolExecutor(Stage.MISC)); }
public synchronized void start(BundleContext context, final String consumerHeaderName) throws Exception { bundleContext = context; logServiceTracker = new LogServiceTracker(context); logServiceTracker.open(); providerBundleTracker = new BundleTracker(context, Bundle.ACTIVE, new ProviderBundleTrackerCustomizer(this, context.getBundle())); providerBundleTracker.open(); consumerBundleTracker = new BundleTracker(context, Bundle.INSTALLED, new ConsumerBundleTrackerCustomizer(this, consumerHeaderName)); consumerBundleTracker.open(); for (Bundle bundle : context.getBundles()) { addConsumerWeavingData(bundle, consumerHeaderName); } activator = this; }
public synchronized void start(BundleContext context, final String consumerHeaderName) throws Exception { bundleContext = context; logServiceTracker = new LogServiceTracker(context); logServiceTracker.open(); providerBundleTracker = new BundleTracker(context, Bundle.ACTIVE, new ProviderBundleTrackerCustomizer(this, context.getBundle())); providerBundleTracker.open(); consumerBundleTracker = new BundleTracker(context, Bundle.INSTALLED | Bundle.RESOLVED | Bundle.STARTING | Bundle.ACTIVE, new ConsumerBundleTrackerCustomizer(this, consumerHeaderName)); consumerBundleTracker.open(); for (Bundle bundle : context.getBundles()) { addConsumerWeavingData(bundle, consumerHeaderName); } activator = this; }
public static void main(String[] args) { try { Directory directory = new RAMDirectory(); Analyzer analyzer = new SimpleAnalyzer(); IndexWriter writer = new IndexWriter(directory, analyzer, true); String[] docs = { "a b c d e", "a b c d e a b c d e", "a b c d e f g h i j", "a c e", "e c a", "a c e a c e", "a c e a b c" }; for (int j = 0; j < docs.length; j++) { Document d = new Document(); d.add(Field.Text("contents", docs[j])); writer.addDocument(d); } writer.close(); Searcher searcher = new IndexSearcher(directory); String[] queries = { // "a b", // "\"a b\"", // "\"a b c\"", // "a c", // "\"a c\"", "\"a c e\"", }; Hits hits = null; QueryParser parser = new QueryParser("contents", analyzer); parser.setPhraseSlop(4); for (int j = 0; j < queries.length; j++) { Query query = parser.parse(queries[j]); System.out.println("Query: " + query.toString("contents")); //DateFilter filter = // new DateFilter("modified", Time(1997,0,1), Time(1998,0,1)); //DateFilter filter = DateFilter.Before("modified", Time(1997,00,01)); //System.out.println(filter); hits = searcher.search(query, null); System.out.println(hits.length() + " total results"); for (int i = 0 ; i < hits.length() && i < 10; i++) { Document d = hits.doc(i); System.out.println(i + " " + hits.score(i) // + " " + DateField.stringToDate(d.get("modified")) + " " + d.get("contents")); } } searcher.close(); } catch (Exception e) { System.out.println(" caught a " + e.getClass() + "\n with message: " + e.getMessage()); } }
public static void main(String[] args) { try { Directory directory = new RAMDirectory(); Analyzer analyzer = new SimpleAnalyzer(); IndexWriter writer = new IndexWriter(directory, analyzer, true); String[] docs = { "a b c d e", "a b c d e a b c d e", "a b c d e f g h i j", "a c e", "e c a", "a c e a c e", "a c e a b c" }; for (int j = 0; j < docs.length; j++) { Document d = new Document(); d.add(Field.Text("contents", docs[j])); writer.addDocument(d); } writer.close(); Searcher searcher = new IndexSearcher(directory); String[] queries = { // "a b", // "\"a b\"", // "\"a b c\"", // "a c", // "\"a c\"", "\"a c e\"", }; Hits hits = null; QueryParser parser = new QueryParser("contents", analyzer); parser.setPhraseSlop(4); for (int j = 0; j < queries.length; j++) { Query query = parser.parse(queries[j]); System.out.println("Query: " + query.toString("contents")); //DateFilter filter = // new DateFilter("modified", Time(1997,0,1), Time(1998,0,1)); //DateFilter filter = DateFilter.Before("modified", Time(1997,00,01)); //System.out.println(filter); hits = searcher.search(query); System.out.println(hits.length() + " total results"); for (int i = 0 ; i < hits.length() && i < 10; i++) { Document d = hits.doc(i); System.out.println(i + " " + hits.score(i) // + " " + DateField.stringToDate(d.get("modified")) + " " + d.get("contents")); } } searcher.close(); } catch (Exception e) { System.out.println(" caught a " + e.getClass() + "\n with message: " + e.getMessage()); } }
public static void test() throws Exception { File file = new File("words.txt"); System.out.println(" reading word file containing " + file.length() + " bytes"); Date start = new Date(); Vector keys = new Vector(); FileInputStream ws = new FileInputStream(file); BufferedReader wr = new BufferedReader(new InputStreamReader(ws)); for (String key = wr.readLine(); key!=null; key = wr.readLine()) keys.addElement(new Term("word", key)); wr.close(); Date end = new Date(); System.out.print(end.getTime() - start.getTime()); System.out.println(" milliseconds to read " + keys.size() + " words"); start = new Date(); Random gen = new Random(1251971); long fp = (gen.nextInt() & 0xF) + 1; long pp = (gen.nextInt() & 0xF) + 1; int[] docFreqs = new int[keys.size()]; long[] freqPointers = new long[keys.size()]; long[] proxPointers = new long[keys.size()]; for (int i = 0; i < keys.size(); i++) { docFreqs[i] = (gen.nextInt() & 0xF) + 1; freqPointers[i] = fp; proxPointers[i] = pp; fp += (gen.nextInt() & 0xF) + 1;; pp += (gen.nextInt() & 0xF) + 1;; } end = new Date(); System.out.print(end.getTime() - start.getTime()); System.out.println(" milliseconds to generate values"); start = new Date(); Directory store = new FSDirectory("test.store", true); FieldInfos fis = new FieldInfos(); TermInfosWriter writer = new TermInfosWriter(store, "words", fis); fis.add("word", false); for (int i = 0; i < keys.size(); i++) writer.add((Term)keys.elementAt(i), new TermInfo(docFreqs[i], freqPointers[i], proxPointers[i])); writer.close(); end = new Date(); System.out.print(end.getTime() - start.getTime()); System.out.println(" milliseconds to write table"); System.out.println(" table occupies " + store.fileLength("words.tis") + " bytes"); start = new Date(); TermInfosReader reader = new TermInfosReader(store, "words", fis); end = new Date(); System.out.print(end.getTime() - start.getTime()); System.out.println(" milliseconds to open table"); start = new Date(); SegmentTermEnum enum = (SegmentTermEnum)reader.terms(); for (int i = 0; i < keys.size(); i++) { enum.next(); Term key = (Term)keys.elementAt(i); if (!key.equals(enum.term())) throw new Exception("wrong term: " + enum.term() + ", expected: " + key + " at " + i); TermInfo ti = enum.termInfo(); if (ti.docFreq != docFreqs[i]) throw new Exception("wrong value: " + Long.toString(ti.docFreq, 16) + ", expected: " + Long.toString(docFreqs[i], 16) + " at " + i); if (ti.freqPointer != freqPointers[i]) throw new Exception("wrong value: " + Long.toString(ti.freqPointer, 16) + ", expected: " + Long.toString(freqPointers[i], 16) + " at " + i); if (ti.proxPointer != proxPointers[i]) throw new Exception("wrong value: " + Long.toString(ti.proxPointer, 16) + ", expected: " + Long.toString(proxPointers[i], 16) + " at " + i); } end = new Date(); System.out.print(end.getTime() - start.getTime()); System.out.println(" milliseconds to iterate over " + keys.size() + " words"); start = new Date(); for (int i = 0; i < keys.size(); i++) { Term key = (Term)keys.elementAt(i); TermInfo ti = reader.get(key); if (ti.docFreq != docFreqs[i]) throw new Exception("wrong value: " + Long.toString(ti.docFreq, 16) + ", expected: " + Long.toString(docFreqs[i], 16) + " at " + i); if (ti.freqPointer != freqPointers[i]) throw new Exception("wrong value: " + Long.toString(ti.freqPointer, 16) + ", expected: " + Long.toString(freqPointers[i], 16) + " at " + i); if (ti.proxPointer != proxPointers[i]) throw new Exception("wrong value: " + Long.toString(ti.proxPointer, 16) + ", expected: " + Long.toString(proxPointers[i], 16) + " at " + i); } end = new Date(); System.out.print((end.getTime() - start.getTime()) / (float)keys.size()); System.out.println(" average milliseconds per lookup"); TermEnum e = reader.terms(new Term("word", "azz")); System.out.println("Word after azz is " + e.term().text); reader.close(); store.close(); }
public static void test() throws Exception { File file = new File("words.txt"); System.out.println(" reading word file containing " + file.length() + " bytes"); Date start = new Date(); Vector keys = new Vector(); FileInputStream ws = new FileInputStream(file); BufferedReader wr = new BufferedReader(new InputStreamReader(ws)); for (String key = wr.readLine(); key!=null; key = wr.readLine()) keys.addElement(new Term("word", key)); wr.close(); Date end = new Date(); System.out.print(end.getTime() - start.getTime()); System.out.println(" milliseconds to read " + keys.size() + " words"); start = new Date(); Random gen = new Random(1251971); long fp = (gen.nextInt() & 0xF) + 1; long pp = (gen.nextInt() & 0xF) + 1; int[] docFreqs = new int[keys.size()]; long[] freqPointers = new long[keys.size()]; long[] proxPointers = new long[keys.size()]; for (int i = 0; i < keys.size(); i++) { docFreqs[i] = (gen.nextInt() & 0xF) + 1; freqPointers[i] = fp; proxPointers[i] = pp; fp += (gen.nextInt() & 0xF) + 1;; pp += (gen.nextInt() & 0xF) + 1;; } end = new Date(); System.out.print(end.getTime() - start.getTime()); System.out.println(" milliseconds to generate values"); start = new Date(); Directory store = FSDirectory.getDirectory("test.store", true); FieldInfos fis = new FieldInfos(); TermInfosWriter writer = new TermInfosWriter(store, "words", fis); fis.add("word", false); for (int i = 0; i < keys.size(); i++) writer.add((Term)keys.elementAt(i), new TermInfo(docFreqs[i], freqPointers[i], proxPointers[i])); writer.close(); end = new Date(); System.out.print(end.getTime() - start.getTime()); System.out.println(" milliseconds to write table"); System.out.println(" table occupies " + store.fileLength("words.tis") + " bytes"); start = new Date(); TermInfosReader reader = new TermInfosReader(store, "words", fis); end = new Date(); System.out.print(end.getTime() - start.getTime()); System.out.println(" milliseconds to open table"); start = new Date(); SegmentTermEnum enum = (SegmentTermEnum)reader.terms(); for (int i = 0; i < keys.size(); i++) { enum.next(); Term key = (Term)keys.elementAt(i); if (!key.equals(enum.term())) throw new Exception("wrong term: " + enum.term() + ", expected: " + key + " at " + i); TermInfo ti = enum.termInfo(); if (ti.docFreq != docFreqs[i]) throw new Exception("wrong value: " + Long.toString(ti.docFreq, 16) + ", expected: " + Long.toString(docFreqs[i], 16) + " at " + i); if (ti.freqPointer != freqPointers[i]) throw new Exception("wrong value: " + Long.toString(ti.freqPointer, 16) + ", expected: " + Long.toString(freqPointers[i], 16) + " at " + i); if (ti.proxPointer != proxPointers[i]) throw new Exception("wrong value: " + Long.toString(ti.proxPointer, 16) + ", expected: " + Long.toString(proxPointers[i], 16) + " at " + i); } end = new Date(); System.out.print(end.getTime() - start.getTime()); System.out.println(" milliseconds to iterate over " + keys.size() + " words"); start = new Date(); for (int i = 0; i < keys.size(); i++) { Term key = (Term)keys.elementAt(i); TermInfo ti = reader.get(key); if (ti.docFreq != docFreqs[i]) throw new Exception("wrong value: " + Long.toString(ti.docFreq, 16) + ", expected: " + Long.toString(docFreqs[i], 16) + " at " + i); if (ti.freqPointer != freqPointers[i]) throw new Exception("wrong value: " + Long.toString(ti.freqPointer, 16) + ", expected: " + Long.toString(freqPointers[i], 16) + " at " + i); if (ti.proxPointer != proxPointers[i]) throw new Exception("wrong value: " + Long.toString(ti.proxPointer, 16) + ", expected: " + Long.toString(proxPointers[i], 16) + " at " + i); } end = new Date(); System.out.print((end.getTime() - start.getTime()) / (float)keys.size()); System.out.println(" average milliseconds per lookup"); TermEnum e = reader.terms(new Term("word", "azz")); System.out.println("Word after azz is " + e.term().text); reader.close(); store.close(); }
private final void add(String name, boolean isIndexed) { FieldInfo fi = fieldInfo(name); if (fi == null) addInternal(name, isIndexed); else if (fi.isIndexed != isIndexed) throw new IllegalStateException("field " + name + (fi.isIndexed ? " must" : " cannot") + " be an indexed field."); }
private Hashtable byName = new Hashtable(); FieldInfos() { add("", false); } FieldInfos(Directory d, String name) throws IOException { InputStream input = d.openFile(name); try { read(input); } finally { input.close(); } } /** Adds field info for a Document. */ final void add(Document doc) { Enumeration fields = doc.fields(); while (fields.hasMoreElements()) { Field field = (Field)fields.nextElement(); add(field.name(), field.isIndexed()); } } /** Merges in information from another FieldInfos. */ final void add(FieldInfos other) { for (int i = 0; i < other.size(); i++) { FieldInfo fi = other.fieldInfo(i); add(fi.name, fi.isIndexed); } } final void add(String name, boolean isIndexed) { FieldInfo fi = fieldInfo(name); if (fi == null) addInternal(name, isIndexed); else if (fi.isIndexed != isIndexed) throw new IllegalStateException("field " + name + (fi.isIndexed ? " must" : " cannot") + " be an indexed field."); }
public void setParams(String sortField) { super.setParams(sortField); String[] fields = sortField.split(","); SortField[] sortFields = new SortField[fields.length]; for (int i = 0; i < fields.length; i++) { String field = fields[i]; int index = field.lastIndexOf(":"); String fieldName; String typeString; if (index != -1) { fieldName = field.substring(0, index); typeString = field.substring(index, field.length()); } else { typeString = "auto"; fieldName = field; } int type = getType(typeString); sortFields[i] = new SortField(fieldName, type); } this.sort = new Sort(sortFields); }
public void setParams(String sortField) { super.setParams(sortField); String[] fields = sortField.split(","); SortField[] sortFields = new SortField[fields.length]; for (int i = 0; i < fields.length; i++) { String field = fields[i]; int index = field.lastIndexOf(":"); String fieldName; String typeString; if (index != -1) { fieldName = field.substring(0, index); typeString = field.substring(1+index, field.length()); } else { typeString = "auto"; fieldName = field; } int type = getType(typeString); sortFields[i] = new SortField(fieldName, type); } this.sort = new Sort(sortFields); }
private boolean deleteOnCleanup; SSTableDeletingReference(SSTableTracker tracker, SSTableReader referent, ReferenceQueue<? super SSTableReader> q) { super(referent, q); this.tracker = tracker; this.path = referent.path; this.size = referent.bytesOnDisk(); }
private boolean deleteOnCleanup; SSTableDeletingReference(SSTableTracker tracker, SSTableReader referent, ReferenceQueue<? super SSTableReader> q) { super(referent, q); this.tracker = tracker; this.path = referent.getFilename(); this.size = referent.bytesOnDisk(); }
public final void apply() throws IOException, ConfigurationException { // ensure migration is serial. don't apply unless the previous version matches. if (!DatabaseDescriptor.getDefsVersion().equals(lastVersion)) throw new ConfigurationException("Previous version mismatch. cannot apply."); // write to schema assert rm != null; if (!clientMode) rm.apply(); beforeApplyModels(); // write migration. if (!clientMode) { long now = System.currentTimeMillis(); ByteBuffer buf = serialize(); RowMutation migration = new RowMutation(Table.SYSTEM_TABLE, MIGRATIONS_KEY); migration.add(new QueryPath(MIGRATIONS_CF, null, ByteBuffer.wrap(UUIDGen.decompose(newVersion))), buf, now); migration.apply(); // note that we're storing this in the system table, which is not replicated logger.debug("Applying migration " + newVersion.toString()); migration = new RowMutation(Table.SYSTEM_TABLE, LAST_MIGRATION_KEY); migration.add(new QueryPath(SCHEMA_CF, null, LAST_MIGRATION_KEY), ByteBuffer.wrap(UUIDGen.decompose(newVersion)), now); migration.apply(); // if we fail here, there will be schema changes in the CL that will get replayed *AFTER* the schema is loaded. // CassandraDaemon checks for this condition (the stored version will be greater than the loaded version) // and calls MigrationManager.applyMigrations(loaded version, stored version). // flush changes out of memtables so we don't need to rely on the commit log. ColumnFamilyStore[] schemaStores = new ColumnFamilyStore[] { Table.open(Table.SYSTEM_TABLE).getColumnFamilyStore(Migration.MIGRATIONS_CF), Table.open(Table.SYSTEM_TABLE).getColumnFamilyStore(Migration.SCHEMA_CF) }; List<Future> flushes = new ArrayList<Future>(); for (ColumnFamilyStore cfs : schemaStores) flushes.add(cfs.forceFlush()); for (Future f : flushes) { if (f == null) // applying the migration triggered a flush independently continue; try { f.get(); } catch (ExecutionException e) { throw new IOException(e); } catch (InterruptedException e) { throw new IOException(e); } } } applyModels(); }
public final void apply() throws IOException, ConfigurationException { // ensure migration is serial. don't apply unless the previous version matches. if (!DatabaseDescriptor.getDefsVersion().equals(lastVersion)) throw new ConfigurationException("Previous version mismatch. cannot apply."); // write to schema assert rm != null; if (!clientMode) rm.apply(); beforeApplyModels(); // write migration. if (!clientMode) { long now = System.currentTimeMillis(); ByteBuffer buf = serialize(); RowMutation migration = new RowMutation(Table.SYSTEM_TABLE, MIGRATIONS_KEY); migration.add(new QueryPath(MIGRATIONS_CF, null, ByteBuffer.wrap(UUIDGen.decompose(newVersion))), buf, now); migration.apply(); // note that we're storing this in the system table, which is not replicated logger.info("Applying migration {} {}", newVersion.toString(), toString()); migration = new RowMutation(Table.SYSTEM_TABLE, LAST_MIGRATION_KEY); migration.add(new QueryPath(SCHEMA_CF, null, LAST_MIGRATION_KEY), ByteBuffer.wrap(UUIDGen.decompose(newVersion)), now); migration.apply(); // if we fail here, there will be schema changes in the CL that will get replayed *AFTER* the schema is loaded. // CassandraDaemon checks for this condition (the stored version will be greater than the loaded version) // and calls MigrationManager.applyMigrations(loaded version, stored version). // flush changes out of memtables so we don't need to rely on the commit log. ColumnFamilyStore[] schemaStores = new ColumnFamilyStore[] { Table.open(Table.SYSTEM_TABLE).getColumnFamilyStore(Migration.MIGRATIONS_CF), Table.open(Table.SYSTEM_TABLE).getColumnFamilyStore(Migration.SCHEMA_CF) }; List<Future> flushes = new ArrayList<Future>(); for (ColumnFamilyStore cfs : schemaStores) flushes.add(cfs.forceFlush()); for (Future f : flushes) { if (f == null) // applying the migration triggered a flush independently continue; try { f.get(); } catch (ExecutionException e) { throw new IOException(e); } catch (InterruptedException e) { throw new IOException(e); } } } applyModels(); }
public CoreDescriptor(CoreContainer container, String name, String instanceDir, Properties coreProps, SolrParams params) { this.coreContainer = container; originalCoreProperties.setProperty(CORE_NAME, name); originalCoreProperties.setProperty(CORE_INSTDIR, instanceDir); Properties containerProperties = container.getContainerProperties(); name = PropertiesUtil.substituteProperty(checkPropertyIsNotEmpty(name, CORE_NAME), containerProperties); instanceDir = PropertiesUtil.substituteProperty(checkPropertyIsNotEmpty(instanceDir, CORE_INSTDIR), containerProperties); coreProperties.putAll(defaultProperties); coreProperties.put(CORE_NAME, name); coreProperties.put(CORE_INSTDIR, instanceDir); coreProperties.put(CORE_ABS_INSTDIR, convertToAbsolute(instanceDir, container.getSolrHome())); for (String propname : coreProps.stringPropertyNames()) { String propvalue = coreProps.getProperty(propname); if (isUserDefinedProperty(propname)) originalExtraProperties.put(propname, propvalue); else originalCoreProperties.put(propname, propvalue); if (!requiredProperties.contains(propname)) // Required props are already dealt with coreProperties.setProperty(propname, PropertiesUtil.substituteProperty(propvalue, containerProperties)); } loadExtraProperties(); buildSubstitutableProperties(); // TODO maybe make this a CloudCoreDescriptor subclass? if (container.isZooKeeperAware()) { cloudDesc = new CloudDescriptor(name, coreProperties, this); if (params != null) { cloudDesc.setParams(params); } } else { cloudDesc = null; } }
public CoreDescriptor(CoreContainer container, String name, String instanceDir, Properties coreProps, SolrParams params) { this.coreContainer = container; originalCoreProperties.setProperty(CORE_NAME, name); originalCoreProperties.setProperty(CORE_INSTDIR, instanceDir); Properties containerProperties = container.getContainerProperties(); name = PropertiesUtil.substituteProperty(checkPropertyIsNotEmpty(name, CORE_NAME), containerProperties); instanceDir = PropertiesUtil.substituteProperty(checkPropertyIsNotEmpty(instanceDir, CORE_INSTDIR), containerProperties); coreProperties.putAll(defaultProperties); coreProperties.put(CORE_NAME, name); coreProperties.put(CORE_INSTDIR, instanceDir); coreProperties.put(CORE_ABS_INSTDIR, convertToAbsolute(instanceDir, container.getCoreRootDirectory())); for (String propname : coreProps.stringPropertyNames()) { String propvalue = coreProps.getProperty(propname); if (isUserDefinedProperty(propname)) originalExtraProperties.put(propname, propvalue); else originalCoreProperties.put(propname, propvalue); if (!requiredProperties.contains(propname)) // Required props are already dealt with coreProperties.setProperty(propname, PropertiesUtil.substituteProperty(propvalue, containerProperties)); } loadExtraProperties(); buildSubstitutableProperties(); // TODO maybe make this a CloudCoreDescriptor subclass? if (container.isZooKeeperAware()) { cloudDesc = new CloudDescriptor(name, coreProperties, this); if (params != null) { cloudDesc.setParams(params); } } else { cloudDesc = null; } }
public void testMaxDocs() throws Exception { DirectUpdateHandler2 updater = (DirectUpdateHandler2)SolrCore.getSolrCore().getUpdateHandler(); DirectUpdateHandler2.CommitTracker tracker = updater.tracker; tracker.timeUpperBound = -1; tracker.docsUpperBound = 14; XmlUpdateRequestHandler handler = new XmlUpdateRequestHandler(); handler.init( null ); SolrCore core = SolrCore.getSolrCore(); MapSolrParams params = new MapSolrParams( new HashMap<String, String>() ); // Add a single document SolrQueryResponse rsp = new SolrQueryResponse(); SolrQueryRequestBase req = new SolrQueryRequestBase( core, params ) {}; for( int i=0; i<14; i++ ) { req.setContentStreams( toContentStreams( adoc("id", "A"+i, "subject", "info" ), null ) ); handler.handleRequest( req, rsp ); } // It should not be there right away assertQ("shouldn't find any", req("id:A1") ,"//result[@numFound=0]" ); assertEquals( 0, tracker.autoCommitCount ); req.setContentStreams( toContentStreams( adoc("id", "A14", "subject", "info" ), null ) ); handler.handleRequest( req, rsp ); // Wait longer then the autocommit time Thread.sleep( 500 ); // blocks until commit is complete req.setContentStreams( toContentStreams( adoc("id", "A15", "subject", "info" ), null ) ); handler.handleRequest( req, rsp ); // Now make sure we can find it assertQ("should find one", req("id:A14") ,"//result[@numFound=1]" ); assertEquals( 1, tracker.autoCommitCount ); // But not the one added afterward assertQ("should find one", req("id:A15") ,"//result[@numFound=0]" ); assertEquals( 1, tracker.autoCommitCount ); // Now add some more for( int i=0; i<14; i++ ) { req.setContentStreams( toContentStreams( adoc("id", "B"+i, "subject", "info" ), null ) ); handler.handleRequest( req, rsp ); } // It should not be there right away assertQ("shouldn't find any", req("id:B1") ,"//result[@numFound=0]" ); assertEquals( 1, tracker.autoCommitCount ); req.setContentStreams( toContentStreams( adoc("id", "B14", "subject", "info" ), null ) ); handler.handleRequest( req, rsp ); Thread.sleep( 500 ); // add request will block if commit has already started or completed req.setContentStreams( toContentStreams( adoc("id", "B15", "subject", "info" ), null ) ); handler.handleRequest( req, rsp ); assertQ("should find one", req("id:B14") ,"//result[@numFound=1]" ); assertEquals( 2, tracker.autoCommitCount ); assertQ("should find none", req("id:B15") ,"//result[@numFound=0]" ); assertEquals( 2, tracker.autoCommitCount ); }
public void testMaxDocs() throws Exception { DirectUpdateHandler2 updater = (DirectUpdateHandler2)SolrCore.getSolrCore().getUpdateHandler(); DirectUpdateHandler2.CommitTracker tracker = updater.tracker; tracker.timeUpperBound = 100000; tracker.docsUpperBound = 14; XmlUpdateRequestHandler handler = new XmlUpdateRequestHandler(); handler.init( null ); SolrCore core = SolrCore.getSolrCore(); MapSolrParams params = new MapSolrParams( new HashMap<String, String>() ); // Add a single document SolrQueryResponse rsp = new SolrQueryResponse(); SolrQueryRequestBase req = new SolrQueryRequestBase( core, params ) {}; for( int i=0; i<14; i++ ) { req.setContentStreams( toContentStreams( adoc("id", "A"+i, "subject", "info" ), null ) ); handler.handleRequest( req, rsp ); } // It should not be there right away assertQ("shouldn't find any", req("id:A1") ,"//result[@numFound=0]" ); assertEquals( 0, tracker.autoCommitCount ); req.setContentStreams( toContentStreams( adoc("id", "A14", "subject", "info" ), null ) ); handler.handleRequest( req, rsp ); // Wait longer then the autocommit time Thread.sleep( 500 ); // blocks until commit is complete req.setContentStreams( toContentStreams( adoc("id", "A15", "subject", "info" ), null ) ); handler.handleRequest( req, rsp ); // Now make sure we can find it assertQ("should find one", req("id:A14") ,"//result[@numFound=1]" ); assertEquals( 1, tracker.autoCommitCount ); // But not the one added afterward assertQ("should find one", req("id:A15") ,"//result[@numFound=0]" ); assertEquals( 1, tracker.autoCommitCount ); // Now add some more for( int i=0; i<14; i++ ) { req.setContentStreams( toContentStreams( adoc("id", "B"+i, "subject", "info" ), null ) ); handler.handleRequest( req, rsp ); } // It should not be there right away assertQ("shouldn't find any", req("id:B1") ,"//result[@numFound=0]" ); assertEquals( 1, tracker.autoCommitCount ); req.setContentStreams( toContentStreams( adoc("id", "B14", "subject", "info" ), null ) ); handler.handleRequest( req, rsp ); Thread.sleep( 500 ); // add request will block if commit has already started or completed req.setContentStreams( toContentStreams( adoc("id", "B15", "subject", "info" ), null ) ); handler.handleRequest( req, rsp ); assertQ("should find one", req("id:B14") ,"//result[@numFound=1]" ); assertEquals( 2, tracker.autoCommitCount ); assertQ("should find none", req("id:B15") ,"//result[@numFound=0]" ); assertEquals( 2, tracker.autoCommitCount ); }
private void parseFieldList(String[] fl, SolrQueryRequest req) { _wantsScore = false; _wantsAllFields = false; if (fl == null || fl.length == 0 || fl.length == 1 && fl[0].length()==0) { _wantsAllFields = true; return; } NamedList<String> rename = new NamedList<String>(); DocTransformers augmenters = new DocTransformers(); for (String fieldList : fl) { add(fieldList,rename,augmenters,req); } for( int i=0; i<rename.size(); i++ ) { String from = rename.getName(i); String to = rename.getVal(i); okFieldNames.add( to ); boolean copy = (reqFieldNames!=null && reqFieldNames.contains(from)); if(!copy) { // Check that subsequent copy/rename requests have the field they need to copy for(int j=i+1; j<rename.size(); j++) { if(from.equals(rename.getName(j))) { rename.setName(j, to); // copy from the current target if(reqFieldNames==null) { reqFieldNames = new HashSet<String>(); } reqFieldNames.add(to); // don't rename our current target } } } augmenters.addTransformer( new RenameFieldTransformer( from, to, copy ) ); } if( !_wantsAllFields ) { if( !globs.isEmpty() ) { // TODO??? need to fill up the fields with matching field names in the index // and add them to okFieldNames? // maybe just get all fields? // this would disable field selection optimization... i think thatis OK fields.clear(); // this will get all fields, and use wantsField to limit } okFieldNames.addAll( fields ); } if( augmenters.size() == 1 ) { transformer = augmenters.getTransformer(0); } else if( augmenters.size() > 1 ) { transformer = augmenters; } } // like getId, but also accepts dashes for legacy fields String getFieldName(QueryParsing.StrParser sp) throws ParseException { sp.eatws(); int id_start = sp.pos; char ch; if (sp.pos < sp.end && (ch = sp.val.charAt(sp.pos)) != '$' && Character.isJavaIdentifierStart(ch)) { sp.pos++; while (sp.pos < sp.end) { ch = sp.val.charAt(sp.pos); if (!Character.isJavaIdentifierPart(ch) && ch != '.' && ch != '-') { break; } sp.pos++; } return sp.val.substring(id_start, sp.pos); } return null; }
private void parseFieldList(String[] fl, SolrQueryRequest req) { _wantsScore = false; _wantsAllFields = false; if (fl == null || fl.length == 0 || fl.length == 1 && fl[0].length()==0) { _wantsAllFields = true; return; } NamedList<String> rename = new NamedList<String>(); DocTransformers augmenters = new DocTransformers(); for (String fieldList : fl) { add(fieldList,rename,augmenters,req); } for( int i=0; i<rename.size(); i++ ) { String from = rename.getName(i); String to = rename.getVal(i); okFieldNames.add( to ); boolean copy = (reqFieldNames!=null && reqFieldNames.contains(from)); if(!copy) { // Check that subsequent copy/rename requests have the field they need to copy for(int j=i+1; j<rename.size(); j++) { if(from.equals(rename.getName(j))) { rename.setName(j, to); // copy from the current target if(reqFieldNames==null) { reqFieldNames = new HashSet<String>(); } reqFieldNames.add(to); // don't rename our current target } } } augmenters.addTransformer( new RenameFieldTransformer( from, to, copy ) ); } if( !_wantsAllFields ) { if( !globs.isEmpty() ) { // TODO??? need to fill up the fields with matching field names in the index // and add them to okFieldNames? // maybe just get all fields? // this would disable field selection optimization... i think thatis OK fields.clear(); // this will get all fields, and use wantsField to limit } okFieldNames.addAll( fields ); } if( augmenters.size() == 1 ) { transformer = augmenters.getTransformer(0); } else if( augmenters.size() > 1 ) { transformer = augmenters; } } // like getId, but also accepts dashes for legacy fields String getFieldName(QueryParsing.StrParser sp) { sp.eatws(); int id_start = sp.pos; char ch; if (sp.pos < sp.end && (ch = sp.val.charAt(sp.pos)) != '$' && Character.isJavaIdentifierStart(ch)) { sp.pos++; while (sp.pos < sp.end) { ch = sp.val.charAt(sp.pos); if (!Character.isJavaIdentifierPart(ch) && ch != '.' && ch != '-') { break; } sp.pos++; } return sp.val.substring(id_start, sp.pos); } return null; }
final InputSource src = SystemIdResolver.this.resolveEntity(null, publicId, baseURI, systemId); return (src == null) ? null : src.getByteStream(); } catch (IOException ioe) { throw new XMLStreamException("Cannot resolve entity", ioe); } } }; } URI resolveRelativeURI(String baseURI, String systemId) throws IOException,URISyntaxException { URI uri; // special case for backwards compatibility: if relative systemId starts with "/" (we convert that to an absolute solrres:-URI) if (systemId.startsWith("/")) { uri = new URI(RESOURCE_LOADER_URI_SCHEME, RESOURCE_LOADER_AUTHORITY_ABSOLUTE, "/", null, null).resolve(systemId); } else { // simply parse as URI uri = new URI(systemId); } // do relative resolving if (baseURI != null ) { uri = new URI(baseURI).resolve(uri); } return uri; }
final InputSource src = SystemIdResolver.this.resolveEntity(null, publicId, baseURI, systemId); return (src == null) ? null : src.getByteStream(); } catch (IOException ioe) { throw new XMLStreamException("Cannot resolve entity", ioe); } } }; } URI resolveRelativeURI(String baseURI, String systemId) throws URISyntaxException { URI uri; // special case for backwards compatibility: if relative systemId starts with "/" (we convert that to an absolute solrres:-URI) if (systemId.startsWith("/")) { uri = new URI(RESOURCE_LOADER_URI_SCHEME, RESOURCE_LOADER_AUTHORITY_ABSOLUTE, "/", null, null).resolve(systemId); } else { // simply parse as URI uri = new URI(systemId); } // do relative resolving if (baseURI != null ) { uri = new URI(baseURI).resolve(uri); } return uri; }
private void doDelete(DeleteUpdateCommand cmd, List<Node> nodes, ModifiableSolrParams params) throws IOException { flushAdds(1); DeleteUpdateCommand clonedCmd = clone(cmd); DeleteRequest deleteRequest = new DeleteRequest(); deleteRequest.cmd = clonedCmd; deleteRequest.params = params; for (Node node : nodes) { List<DeleteRequest> dlist = deletes.get(node); if (dlist == null) { dlist = new ArrayList<DeleteRequest>(2); deletes.put(node, dlist); } dlist.add(deleteRequest); } flushDeletes(maxBufferedDeletesPerServer); }
private void doDelete(DeleteUpdateCommand cmd, List<Node> nodes, ModifiableSolrParams params) { flushAdds(1); DeleteUpdateCommand clonedCmd = clone(cmd); DeleteRequest deleteRequest = new DeleteRequest(); deleteRequest.cmd = clonedCmd; deleteRequest.params = params; for (Node node : nodes) { List<DeleteRequest> dlist = deletes.get(node); if (dlist == null) { dlist = new ArrayList<DeleteRequest>(2); deletes.put(node, dlist); } dlist.add(deleteRequest); } flushDeletes(maxBufferedDeletesPerServer); }
public void reset(Reader input) throws IOException { try { sentenceTokenizer.reset(input); wordTokenFilter = (TokenStream) tokenFilterClass.getConstructor( TokenStream.class).newInstance(sentenceTokenizer); term = wordTokenFilter.addAttribute(CharTermAttribute.class); } catch (Exception e) { throw ExceptionUtils.wrapAsRuntimeException(e); } } } } }
public void reset(Reader input) { try { sentenceTokenizer.reset(input); wordTokenFilter = (TokenStream) tokenFilterClass.getConstructor( TokenStream.class).newInstance(sentenceTokenizer); term = wordTokenFilter.addAttribute(CharTermAttribute.class); } catch (Exception e) { throw ExceptionUtils.wrapAsRuntimeException(e); } } } } }
public int[] getArray() { return prefetchParentOrdinal; } /** * refreshPrefetch() refreshes the parent array. Initially, it fills the * array from the positions of an appropriate posting list. If called during * a refresh(), when the arrays already exist, only values for new documents * (those beyond the last one in the array) are read from the positions and * added to the arrays (that are appropriately enlarged). We assume (and * this is indeed a correct assumption in our case) that existing categories * are never modified or deleted. */ void refresh(IndexReader indexReader) throws IOException { // Note that it is not necessary for us to obtain the read lock. // The reason is that we are only called from refresh() (precluding // another concurrent writer) or from the constructor (when no method // could be running). // The write lock is also not held during the following code, meaning // that reads *can* happen while this code is running. The "volatile" // property of the prefetchParentOrdinal and prefetchDepth array // references ensure the correct visibility property of the assignment // but other than that, we do *not* guarantee that a reader will not // use an old version of one of these arrays (or both) while a refresh // is going on. But we find this acceptable - until a refresh has // finished, the reader should not expect to see new information // (and the old information is the same in the old and new versions). int first; int num = indexReader.maxDoc(); if (prefetchParentOrdinal==null) { prefetchParentOrdinal = new int[num]; // Starting Lucene 2.9, following the change LUCENE-1542, we can // no longer reliably read the parent "-1" (see comment in // LuceneTaxonomyWriter.SinglePositionTokenStream). We have no way // to fix this in indexing without breaking backward-compatibility // with existing indexes, so what we'll do instead is just // hard-code the parent of ordinal 0 to be -1, and assume (as is // indeed the case) that no other parent can be -1. if (num>0) { prefetchParentOrdinal[0] = TaxonomyReader.INVALID_ORDINAL; } first = 1; } else { first = prefetchParentOrdinal.length; if (first==num) { return; // nothing to do - no category was added } // In Java 6, we could just do Arrays.copyOf()... int[] newarray = new int[num]; System.arraycopy(prefetchParentOrdinal, 0, newarray, 0, prefetchParentOrdinal.length); prefetchParentOrdinal = newarray; } // Read the new part of the parents array from the positions: // TODO (Facet): avoid Multi*? Bits liveDocs = MultiFields.getLiveDocs(indexReader); DocsAndPositionsEnum positions = MultiFields.getTermPositionsEnum(indexReader, liveDocs, Consts.FIELD_PAYLOADS, new BytesRef(Consts.PAYLOAD_PARENT), false); if ((positions == null || positions.advance(first) == DocIdSetIterator.NO_MORE_DOCS) && first < num) { throw new CorruptIndexException("Missing parent data for category " + first); } for (int i=first; i<num; i++) { // Note that we know positions.doc() >= i (this is an // invariant kept throughout this loop) if (positions.docID()==i) { if (positions.freq() == 0) { // shouldn't happen throw new CorruptIndexException( "Missing parent data for category "+i); } // TODO (Facet): keep a local (non-volatile) copy of the prefetchParentOrdinal // reference, because access to volatile reference is slower (?). // Note: The positions we get here are one less than the position // increment we added originally, so we get here the right numbers: prefetchParentOrdinal[i] = positions.nextPosition(); if (positions.nextDoc() == DocIdSetIterator.NO_MORE_DOCS) { if ( i+1 < num ) { throw new CorruptIndexException( "Missing parent data for category "+(i+1)); } break; } } else { // this shouldn't happen throw new CorruptIndexException( "Missing parent data for category "+i); } } } /** * add() is used in LuceneTaxonomyWriter, not in LuceneTaxonomyReader. * It is only called from a synchronized method, so it is not reentrant, * and also doesn't need to worry about reads happening at the same time. * * NOTE: add() and refresh() CANNOT be used together. If you call add(), * this changes the arrays and refresh() can no longer be used. */ void add(int ordinal, int parentOrdinal) throws IOException { if (ordinal >= prefetchParentOrdinal.length) { // grow the array, if necessary. // In Java 6, we could just do Arrays.copyOf()... int[] newarray = new int[ordinal*2+1]; System.arraycopy(prefetchParentOrdinal, 0, newarray, 0, prefetchParentOrdinal.length); prefetchParentOrdinal = newarray; } prefetchParentOrdinal[ordinal] = parentOrdinal; }
public int[] getArray() { return prefetchParentOrdinal; } /** * refreshPrefetch() refreshes the parent array. Initially, it fills the * array from the positions of an appropriate posting list. If called during * a refresh(), when the arrays already exist, only values for new documents * (those beyond the last one in the array) are read from the positions and * added to the arrays (that are appropriately enlarged). We assume (and * this is indeed a correct assumption in our case) that existing categories * are never modified or deleted. */ void refresh(IndexReader indexReader) throws IOException { // Note that it is not necessary for us to obtain the read lock. // The reason is that we are only called from refresh() (precluding // another concurrent writer) or from the constructor (when no method // could be running). // The write lock is also not held during the following code, meaning // that reads *can* happen while this code is running. The "volatile" // property of the prefetchParentOrdinal and prefetchDepth array // references ensure the correct visibility property of the assignment // but other than that, we do *not* guarantee that a reader will not // use an old version of one of these arrays (or both) while a refresh // is going on. But we find this acceptable - until a refresh has // finished, the reader should not expect to see new information // (and the old information is the same in the old and new versions). int first; int num = indexReader.maxDoc(); if (prefetchParentOrdinal==null) { prefetchParentOrdinal = new int[num]; // Starting Lucene 2.9, following the change LUCENE-1542, we can // no longer reliably read the parent "-1" (see comment in // LuceneTaxonomyWriter.SinglePositionTokenStream). We have no way // to fix this in indexing without breaking backward-compatibility // with existing indexes, so what we'll do instead is just // hard-code the parent of ordinal 0 to be -1, and assume (as is // indeed the case) that no other parent can be -1. if (num>0) { prefetchParentOrdinal[0] = TaxonomyReader.INVALID_ORDINAL; } first = 1; } else { first = prefetchParentOrdinal.length; if (first==num) { return; // nothing to do - no category was added } // In Java 6, we could just do Arrays.copyOf()... int[] newarray = new int[num]; System.arraycopy(prefetchParentOrdinal, 0, newarray, 0, prefetchParentOrdinal.length); prefetchParentOrdinal = newarray; } // Read the new part of the parents array from the positions: // TODO (Facet): avoid Multi*? Bits liveDocs = MultiFields.getLiveDocs(indexReader); DocsAndPositionsEnum positions = MultiFields.getTermPositionsEnum(indexReader, liveDocs, Consts.FIELD_PAYLOADS, new BytesRef(Consts.PAYLOAD_PARENT), false); if ((positions == null || positions.advance(first) == DocIdSetIterator.NO_MORE_DOCS) && first < num) { throw new CorruptIndexException("Missing parent data for category " + first); } for (int i=first; i<num; i++) { // Note that we know positions.doc() >= i (this is an // invariant kept throughout this loop) if (positions.docID()==i) { if (positions.freq() == 0) { // shouldn't happen throw new CorruptIndexException( "Missing parent data for category "+i); } // TODO (Facet): keep a local (non-volatile) copy of the prefetchParentOrdinal // reference, because access to volatile reference is slower (?). // Note: The positions we get here are one less than the position // increment we added originally, so we get here the right numbers: prefetchParentOrdinal[i] = positions.nextPosition(); if (positions.nextDoc() == DocIdSetIterator.NO_MORE_DOCS) { if ( i+1 < num ) { throw new CorruptIndexException( "Missing parent data for category "+(i+1)); } break; } } else { // this shouldn't happen throw new CorruptIndexException( "Missing parent data for category "+i); } } } /** * add() is used in LuceneTaxonomyWriter, not in LuceneTaxonomyReader. * It is only called from a synchronized method, so it is not reentrant, * and also doesn't need to worry about reads happening at the same time. * * NOTE: add() and refresh() CANNOT be used together. If you call add(), * this changes the arrays and refresh() can no longer be used. */ void add(int ordinal, int parentOrdinal) { if (ordinal >= prefetchParentOrdinal.length) { // grow the array, if necessary. // In Java 6, we could just do Arrays.copyOf()... int[] newarray = new int[ordinal*2+1]; System.arraycopy(prefetchParentOrdinal, 0, newarray, 0, prefetchParentOrdinal.length); prefetchParentOrdinal = newarray; } prefetchParentOrdinal[ordinal] = parentOrdinal; }
public static void main(String[] args) throws IOException, ClassNotFoundException { DictionaryFormat format; if (args[0].equalsIgnoreCase("ipadic")) { format = DictionaryFormat.IPADIC; } else if (args[0].equalsIgnoreCase("unidic")) { format = DictionaryFormat.UNIDIC; } else { System.err.println("Illegal format " + args[0] + " using unidic instead"); format = DictionaryFormat.IPADIC; } String inputDirname = args[1]; String outputDirname = args[2]; String inputEncoding = args[3]; boolean normalizeEntries = Boolean.parseBoolean(args[4]); System.out.println("dictionary builder"); System.out.println(""); System.out.println("dictionary format: " + format); System.out.println("input directory: " + inputDirname); System.out.println("output directory: " + outputDirname); System.out.println("input encoding: " + inputEncoding); System.out.println("normalize entries: " + normalizeEntries); System.out.println(""); DictionaryBuilder.build(format, inputDirname, outputDirname, inputEncoding, normalizeEntries); } }
public static void main(String[] args) throws IOException { DictionaryFormat format; if (args[0].equalsIgnoreCase("ipadic")) { format = DictionaryFormat.IPADIC; } else if (args[0].equalsIgnoreCase("unidic")) { format = DictionaryFormat.UNIDIC; } else { System.err.println("Illegal format " + args[0] + " using unidic instead"); format = DictionaryFormat.IPADIC; } String inputDirname = args[1]; String outputDirname = args[2]; String inputEncoding = args[3]; boolean normalizeEntries = Boolean.parseBoolean(args[4]); System.out.println("dictionary builder"); System.out.println(""); System.out.println("dictionary format: " + format); System.out.println("input directory: " + inputDirname); System.out.println("output directory: " + outputDirname); System.out.println("input encoding: " + inputEncoding); System.out.println("normalize entries: " + normalizeEntries); System.out.println(""); DictionaryBuilder.build(format, inputDirname, outputDirname, inputEncoding, normalizeEntries); } }
public static void main(String args[]) throws Exception { outputHeader(); outputMacro("ALetterSupp", "[:WordBreak=ALetter:]"); outputMacro("FormatSupp", "[:WordBreak=Format:]"); outputMacro("ExtendSupp", "[:WordBreak=Extend:]"); outputMacro("NumericSupp", "[:WordBreak=Numeric:]"); outputMacro("KatakanaSupp", "[:WordBreak=Katakana:]"); outputMacro("MidLetterSupp", "[:WordBreak=MidLetter:]"); outputMacro("MidNumSupp", "[:WordBreak=MidNum:]"); outputMacro("MidNumLetSupp", "[:WordBreak=MidNumLet:]"); outputMacro("ExtendNumLetSupp", "[:WordBreak=ExtendNumLet:]"); outputMacro("ExtendNumLetSupp", "[:WordBreak=ExtendNumLet:]"); outputMacro("ComplexContextSupp", "[:LineBreak=Complex_Context:]"); outputMacro("HanSupp", "[:Script=Han:]"); outputMacro("HiraganaSupp", "[:Script=Hiragana:]"); } static void outputHeader() { System.out.print(APACHE_LICENSE); System.out.print("// Generated using ICU4J " + VersionInfo.ICU_VERSION.toString() + " on "); System.out.println(DATE_FORMAT.format(new Date())); System.out.println("// by " + GenerateJFlexSupplementaryMacros.class.getName()); System.out.print(NL + NL); } // we have to carefully output the possibilities as compact utf-16 // range expressions, or jflex will OOM! static void outputMacro(String name, String pattern) { UnicodeSet set = new UnicodeSet(pattern); set.removeAll(BMP); System.out.println(name + " = ("); // if the set is empty, we have to do this or jflex will barf if (set.isEmpty()) { System.out.println("\t []"); } HashMap<Character,UnicodeSet> utf16ByLead = new HashMap<Character,UnicodeSet>(); for (UnicodeSetIterator it = new UnicodeSetIterator(set); it.next();) { char utf16[] = Character.toChars(it.codepoint); UnicodeSet trails = utf16ByLead.get(utf16[0]); if (trails == null) { trails = new UnicodeSet(); utf16ByLead.put(utf16[0], trails); } trails.add(utf16[1]); } boolean isFirst = true; for (Character c : utf16ByLead.keySet()) { UnicodeSet trail = utf16ByLead.get(c); System.out.print( isFirst ? "\t " : "\t| "); isFirst = false; System.out.println("([\\u" + Integer.toHexString(c) + "]" + trail.getRegexEquivalent() + ")"); } System.out.println(")"); } }
public static void main(String args[]) { outputHeader(); outputMacro("ALetterSupp", "[:WordBreak=ALetter:]"); outputMacro("FormatSupp", "[:WordBreak=Format:]"); outputMacro("ExtendSupp", "[:WordBreak=Extend:]"); outputMacro("NumericSupp", "[:WordBreak=Numeric:]"); outputMacro("KatakanaSupp", "[:WordBreak=Katakana:]"); outputMacro("MidLetterSupp", "[:WordBreak=MidLetter:]"); outputMacro("MidNumSupp", "[:WordBreak=MidNum:]"); outputMacro("MidNumLetSupp", "[:WordBreak=MidNumLet:]"); outputMacro("ExtendNumLetSupp", "[:WordBreak=ExtendNumLet:]"); outputMacro("ExtendNumLetSupp", "[:WordBreak=ExtendNumLet:]"); outputMacro("ComplexContextSupp", "[:LineBreak=Complex_Context:]"); outputMacro("HanSupp", "[:Script=Han:]"); outputMacro("HiraganaSupp", "[:Script=Hiragana:]"); } static void outputHeader() { System.out.print(APACHE_LICENSE); System.out.print("// Generated using ICU4J " + VersionInfo.ICU_VERSION.toString() + " on "); System.out.println(DATE_FORMAT.format(new Date())); System.out.println("// by " + GenerateJFlexSupplementaryMacros.class.getName()); System.out.print(NL + NL); } // we have to carefully output the possibilities as compact utf-16 // range expressions, or jflex will OOM! static void outputMacro(String name, String pattern) { UnicodeSet set = new UnicodeSet(pattern); set.removeAll(BMP); System.out.println(name + " = ("); // if the set is empty, we have to do this or jflex will barf if (set.isEmpty()) { System.out.println("\t []"); } HashMap<Character,UnicodeSet> utf16ByLead = new HashMap<Character,UnicodeSet>(); for (UnicodeSetIterator it = new UnicodeSetIterator(set); it.next();) { char utf16[] = Character.toChars(it.codepoint); UnicodeSet trails = utf16ByLead.get(utf16[0]); if (trails == null) { trails = new UnicodeSet(); utf16ByLead.put(utf16[0], trails); } trails.add(utf16[1]); } boolean isFirst = true; for (Character c : utf16ByLead.keySet()) { UnicodeSet trail = utf16ByLead.get(c); System.out.print( isFirst ? "\t " : "\t| "); isFirst = false; System.out.println("([\\u" + Integer.toHexString(c) + "]" + trail.getRegexEquivalent() + ")"); } System.out.println(")"); } }
public static void main(String args[]) throws Exception { outputHeader(); outputMacro("ID_Start_Supp", "[:ID_Start:]"); outputMacro("ID_Continue_Supp", "[:ID_Continue:]"); } static void outputHeader() { System.out.print(APACHE_LICENSE); System.out.print("// Generated using ICU4J " + VersionInfo.ICU_VERSION.toString() + " on "); System.out.println(DATE_FORMAT.format(new Date())); System.out.println("// by " + GenerateHTMLStripCharFilterSupplementaryMacros.class.getName()); System.out.print(NL + NL); } // we have to carefully output the possibilities as compact utf-16 // range expressions, or jflex will OOM! static void outputMacro(String name, String pattern) { UnicodeSet set = new UnicodeSet(pattern); set.removeAll(BMP); System.out.println(name + " = ("); // if the set is empty, we have to do this or jflex will barf if (set.isEmpty()) { System.out.println("\t []"); } HashMap<Character,UnicodeSet> utf16ByLead = new HashMap<Character,UnicodeSet>(); for (UnicodeSetIterator it = new UnicodeSetIterator(set); it.next();) { char utf16[] = Character.toChars(it.codepoint); UnicodeSet trails = utf16ByLead.get(utf16[0]); if (trails == null) { trails = new UnicodeSet(); utf16ByLead.put(utf16[0], trails); } trails.add(utf16[1]); } Map<String,UnicodeSet> utf16ByTrail = new HashMap<String,UnicodeSet>(); for (Map.Entry<Character,UnicodeSet> entry : utf16ByLead.entrySet()) { String trail = entry.getValue().getRegexEquivalent(); UnicodeSet leads = utf16ByTrail.get(trail); if (leads == null) { leads = new UnicodeSet(); utf16ByTrail.put(trail, leads); } leads.add(entry.getKey()); } boolean isFirst = true; for (Map.Entry<String,UnicodeSet> entry : utf16ByTrail.entrySet()) { System.out.print( isFirst ? "\t " : "\t| "); isFirst = false; System.out.println(entry.getValue().getRegexEquivalent() + entry.getKey()); } System.out.println(")"); } }
public static void main(String args[]) { outputHeader(); outputMacro("ID_Start_Supp", "[:ID_Start:]"); outputMacro("ID_Continue_Supp", "[:ID_Continue:]"); } static void outputHeader() { System.out.print(APACHE_LICENSE); System.out.print("// Generated using ICU4J " + VersionInfo.ICU_VERSION.toString() + " on "); System.out.println(DATE_FORMAT.format(new Date())); System.out.println("// by " + GenerateHTMLStripCharFilterSupplementaryMacros.class.getName()); System.out.print(NL + NL); } // we have to carefully output the possibilities as compact utf-16 // range expressions, or jflex will OOM! static void outputMacro(String name, String pattern) { UnicodeSet set = new UnicodeSet(pattern); set.removeAll(BMP); System.out.println(name + " = ("); // if the set is empty, we have to do this or jflex will barf if (set.isEmpty()) { System.out.println("\t []"); } HashMap<Character,UnicodeSet> utf16ByLead = new HashMap<Character,UnicodeSet>(); for (UnicodeSetIterator it = new UnicodeSetIterator(set); it.next();) { char utf16[] = Character.toChars(it.codepoint); UnicodeSet trails = utf16ByLead.get(utf16[0]); if (trails == null) { trails = new UnicodeSet(); utf16ByLead.put(utf16[0], trails); } trails.add(utf16[1]); } Map<String,UnicodeSet> utf16ByTrail = new HashMap<String,UnicodeSet>(); for (Map.Entry<Character,UnicodeSet> entry : utf16ByLead.entrySet()) { String trail = entry.getValue().getRegexEquivalent(); UnicodeSet leads = utf16ByTrail.get(trail); if (leads == null) { leads = new UnicodeSet(); utf16ByTrail.put(trail, leads); } leads.add(entry.getKey()); } boolean isFirst = true; for (Map.Entry<String,UnicodeSet> entry : utf16ByTrail.entrySet()) { System.out.print( isFirst ? "\t " : "\t| "); isFirst = false; System.out.println(entry.getValue().getRegexEquivalent() + entry.getKey()); } System.out.println(")"); } }
public void end() throws IOException { // set final offset final int finalOffset = correctOffset(tokenEnd); offsetAtt.setOffset(finalOffset, finalOffset); } }
public void end() { // set final offset final int finalOffset = correctOffset(tokenEnd); offsetAtt.setOffset(finalOffset, finalOffset); } }
public void end() throws IOException { // set final offset final int finalOffset = correctOffset(scanner.yychar() + scanner.yylength()); this.offsetAtt.setOffset(finalOffset, finalOffset); } }
public void end() { // set final offset final int finalOffset = correctOffset(scanner.yychar() + scanner.yylength()); this.offsetAtt.setOffset(finalOffset, finalOffset); } }
private void addInternal(CharsRef synset[], int size) throws IOException { if (size <= 1) { return; // nothing to do } if (expand) { for (int i = 0; i < size; i++) { for (int j = 0; j < size; j++) { add(synset[i], synset[j], false); } } } else { for (int i = 0; i < size; i++) { add(synset[i], synset[0], false); } } } }
private void addInternal(CharsRef synset[], int size) { if (size <= 1) { return; // nothing to do } if (expand) { for (int i = 0; i < size; i++) { for (int j = 0; j < size; j++) { add(synset[i], synset[j], false); } } } else { for (int i = 0; i < size; i++) { add(synset[i], synset[0], false); } } } }
public static void main(String[] args) throws Exception { TernaryTree tt = new TernaryTree(); tt.insert("Carlos", 'C'); tt.insert("Car", 'r'); tt.insert("palos", 'l'); tt.insert("pa", 'p'); tt.trimToSize(); System.out.println((char) tt.find("Car")); System.out.println((char) tt.find("Carlos")); System.out.println((char) tt.find("alto")); tt.printStats(); } }
public static void main(String[] args) { TernaryTree tt = new TernaryTree(); tt.insert("Carlos", 'C'); tt.insert("Car", 'r'); tt.insert("palos", 'l'); tt.insert("pa", 'p'); tt.trimToSize(); System.out.println((char) tt.find("Car")); System.out.println((char) tt.find("Carlos")); System.out.println((char) tt.find("alto")); tt.printStats(); } }
protected boolean accept() throws IOException { return useWhiteList == stopTypes.contains(typeAttribute.type()); } }
protected boolean accept() { return useWhiteList == stopTypes.contains(typeAttribute.type()); } }
private MultiPhraseQuery randomPhraseQuery(long seed) throws Exception { Random random = new Random(seed); int length = _TestUtil.nextInt(random, 2, 5); MultiPhraseQuery pq = new MultiPhraseQuery(); int position = 0; for (int i = 0; i < length; i++) { int depth = _TestUtil.nextInt(random, 1, 3); Term terms[] = new Term[depth]; for (int j = 0; j < depth; j++) { terms[j] = new Term("field", "" + (char) _TestUtil.nextInt(random, 'a', 'z')); } pq.add(terms, position); position += _TestUtil.nextInt(random, 1, 3); } return pq; } }
private MultiPhraseQuery randomPhraseQuery(long seed) { Random random = new Random(seed); int length = _TestUtil.nextInt(random, 2, 5); MultiPhraseQuery pq = new MultiPhraseQuery(); int position = 0; for (int i = 0; i < length; i++) { int depth = _TestUtil.nextInt(random, 1, 3); Term terms[] = new Term[depth]; for (int j = 0; j < depth; j++) { terms[j] = new Term("field", "" + (char) _TestUtil.nextInt(random, 'a', 'z')); } pq.add(terms, position); position += _TestUtil.nextInt(random, 1, 3); } return pq; } }
private void checkInvariants(IndexWriter writer) throws IOException { writer.waitForMerges(); int maxBufferedDocs = writer.getConfig().getMaxBufferedDocs(); int mergeFactor = ((LogMergePolicy) writer.getConfig().getMergePolicy()).getMergeFactor(); int maxMergeDocs = ((LogMergePolicy) writer.getConfig().getMergePolicy()).getMaxMergeDocs(); int ramSegmentCount = writer.getNumBufferedDocuments(); assertTrue(ramSegmentCount < maxBufferedDocs); int lowerBound = -1; int upperBound = maxBufferedDocs; int numSegments = 0; int segmentCount = writer.getSegmentCount(); for (int i = segmentCount - 1; i >= 0; i--) { int docCount = writer.getDocCount(i); assertTrue("docCount=" + docCount + " lowerBound=" + lowerBound + " upperBound=" + upperBound + " i=" + i + " segmentCount=" + segmentCount + " index=" + writer.segString() + " config=" + writer.getConfig(), docCount > lowerBound); if (docCount <= upperBound) { numSegments++; } else { if (upperBound * mergeFactor <= maxMergeDocs) { assertTrue("maxMergeDocs=" + maxMergeDocs + "; numSegments=" + numSegments + "; upperBound=" + upperBound + "; mergeFactor=" + mergeFactor + "; segs=" + writer.segString() + " config=" + writer.getConfig(), numSegments < mergeFactor); } do { lowerBound = upperBound; upperBound *= mergeFactor; } while (docCount > upperBound); numSegments = 1; } } if (upperBound * mergeFactor <= maxMergeDocs) { assertTrue(numSegments < mergeFactor); } } }
private void checkInvariants(IndexWriter writer) { writer.waitForMerges(); int maxBufferedDocs = writer.getConfig().getMaxBufferedDocs(); int mergeFactor = ((LogMergePolicy) writer.getConfig().getMergePolicy()).getMergeFactor(); int maxMergeDocs = ((LogMergePolicy) writer.getConfig().getMergePolicy()).getMaxMergeDocs(); int ramSegmentCount = writer.getNumBufferedDocuments(); assertTrue(ramSegmentCount < maxBufferedDocs); int lowerBound = -1; int upperBound = maxBufferedDocs; int numSegments = 0; int segmentCount = writer.getSegmentCount(); for (int i = segmentCount - 1; i >= 0; i--) { int docCount = writer.getDocCount(i); assertTrue("docCount=" + docCount + " lowerBound=" + lowerBound + " upperBound=" + upperBound + " i=" + i + " segmentCount=" + segmentCount + " index=" + writer.segString() + " config=" + writer.getConfig(), docCount > lowerBound); if (docCount <= upperBound) { numSegments++; } else { if (upperBound * mergeFactor <= maxMergeDocs) { assertTrue("maxMergeDocs=" + maxMergeDocs + "; numSegments=" + numSegments + "; upperBound=" + upperBound + "; mergeFactor=" + mergeFactor + "; segs=" + writer.segString() + " config=" + writer.getConfig(), numSegments < mergeFactor); } do { lowerBound = upperBound; upperBound *= mergeFactor; } while (docCount > upperBound); numSegments = 1; } } if (upperBound * mergeFactor <= maxMergeDocs) { assertTrue(numSegments < mergeFactor); } } }
private String runAndReturnSyserr() throws Exception { JUnitCore.runClasses(Nested.class); String err = getSysErr(); // super.prevSysErr.println("Type: " + type + ", point: " + where + " resulted in:\n" + err); // super.prevSysErr.println("---"); return err; } }
private String runAndReturnSyserr() { JUnitCore.runClasses(Nested.class); String err = getSysErr(); // super.prevSysErr.println("Type: " + type + ", point: " + where + " resulted in:\n" + err); // super.prevSysErr.println("---"); return err; } }
public Writer(Directory dir, String id, Comparator<BytesRef> comp, Counter bytesUsed, IOContext context, float acceptableOverheadRatio) throws IOException { super(dir, id, CODEC_NAME_IDX, CODEC_NAME_DAT, VERSION_CURRENT, bytesUsed, context, acceptableOverheadRatio, Type.BYTES_FIXED_SORTED); this.comp = comp; }
public Writer(Directory dir, String id, Comparator<BytesRef> comp, Counter bytesUsed, IOContext context, float acceptableOverheadRatio) { super(dir, id, CODEC_NAME_IDX, CODEC_NAME_DAT, VERSION_CURRENT, bytesUsed, context, acceptableOverheadRatio, Type.BYTES_FIXED_SORTED); this.comp = comp; }
public Writer(Directory dir, String id, Comparator<BytesRef> comp, Counter bytesUsed, IOContext context, float acceptableOverheadRatio) throws IOException { super(dir, id, CODEC_NAME_IDX, CODEC_NAME_DAT, VERSION_CURRENT, bytesUsed, context, acceptableOverheadRatio, Type.BYTES_VAR_SORTED); this.comp = comp; size = 0; }
public Writer(Directory dir, String id, Comparator<BytesRef> comp, Counter bytesUsed, IOContext context, float acceptableOverheadRatio) { super(dir, id, CODEC_NAME_IDX, CODEC_NAME_DAT, VERSION_CURRENT, bytesUsed, context, acceptableOverheadRatio, Type.BYTES_VAR_SORTED); this.comp = comp; size = 0; }
public DisjunctionMaxScorer(Weight weight, float tieBreakerMultiplier, Scorer[] subScorers, int numScorers) throws IOException { super(weight); this.tieBreakerMultiplier = tieBreakerMultiplier; // The passed subScorers array includes only scorers which have documents // (DisjunctionMaxQuery takes care of that), and their nextDoc() was already // called. this.subScorers = subScorers; this.numScorers = numScorers; heapify(); }
public DisjunctionMaxScorer(Weight weight, float tieBreakerMultiplier, Scorer[] subScorers, int numScorers) { super(weight); this.tieBreakerMultiplier = tieBreakerMultiplier; // The passed subScorers array includes only scorers which have documents // (DisjunctionMaxQuery takes care of that), and their nextDoc() was already // called. this.subScorers = subScorers; this.numScorers = numScorers; heapify(); }
private final Similarity.ExactSimScorer docScorer; /** * Construct a <code>TermScorer</code>. * * @param weight * The weight of the <code>Term</code> in the query. * @param td * An iterator over the documents matching the <code>Term</code>. * @param docScorer * The </code>Similarity.ExactSimScorer</code> implementation * to be used for score computations. */ MatchOnlyTermScorer(Weight weight, DocsEnum td, Similarity.ExactSimScorer docScorer) throws IOException { super(weight); this.docScorer = docScorer; this.docsEnum = td; }
private final Similarity.ExactSimScorer docScorer; /** * Construct a <code>TermScorer</code>. * * @param weight * The weight of the <code>Term</code> in the query. * @param td * An iterator over the documents matching the <code>Term</code>. * @param docScorer * The </code>Similarity.ExactSimScorer</code> implementation * to be used for score computations. */ MatchOnlyTermScorer(Weight weight, DocsEnum td, Similarity.ExactSimScorer docScorer) { super(weight); this.docScorer = docScorer; this.docsEnum = td; }
private final Bits liveDocs; MatchAllScorer(IndexReader reader, Bits liveDocs, Weight w, float score) throws IOException { super(w); this.liveDocs = liveDocs; this.score = score; maxDoc = reader.maxDoc(); }
private final Bits liveDocs; MatchAllScorer(IndexReader reader, Bits liveDocs, Weight w, float score) { super(w); this.liveDocs = liveDocs; this.score = score; maxDoc = reader.maxDoc(); }
private final Similarity.ExactSimScorer docScorer; /** * Construct a <code>TermScorer</code>. * * @param weight * The weight of the <code>Term</code> in the query. * @param td * An iterator over the documents matching the <code>Term</code>. * @param docScorer * The </code>Similarity.ExactSimScorer</code> implementation * to be used for score computations. */ TermScorer(Weight weight, DocsEnum td, Similarity.ExactSimScorer docScorer) throws IOException { super(weight); this.docScorer = docScorer; this.docsEnum = td; }
private final Similarity.ExactSimScorer docScorer; /** * Construct a <code>TermScorer</code>. * * @param weight * The weight of the <code>Term</code> in the query. * @param td * An iterator over the documents matching the <code>Term</code>. * @param docScorer * The </code>Similarity.ExactSimScorer</code> implementation * to be used for score computations. */ TermScorer(Weight weight, DocsEnum td, Similarity.ExactSimScorer docScorer) { super(weight); this.docScorer = docScorer; this.docsEnum = td; }
public void addField(IndexableField field, FieldInfo fieldInfo) throws IOException { if (numStoredFields == storedFields.length) { int newSize = ArrayUtil.oversize(numStoredFields + 1, RamUsageEstimator.NUM_BYTES_OBJECT_REF); IndexableField[] newArray = new IndexableField[newSize]; System.arraycopy(storedFields, 0, newArray, 0, numStoredFields); storedFields = newArray; FieldInfo[] newInfoArray = new FieldInfo[newSize]; System.arraycopy(fieldInfos, 0, newInfoArray, 0, numStoredFields); fieldInfos = newInfoArray; } storedFields[numStoredFields] = field; fieldInfos[numStoredFields] = fieldInfo; numStoredFields++; assert docState.testPoint("StoredFieldsWriterPerThread.processFields.writeField"); } }
public void addField(IndexableField field, FieldInfo fieldInfo) { if (numStoredFields == storedFields.length) { int newSize = ArrayUtil.oversize(numStoredFields + 1, RamUsageEstimator.NUM_BYTES_OBJECT_REF); IndexableField[] newArray = new IndexableField[newSize]; System.arraycopy(storedFields, 0, newArray, 0, numStoredFields); storedFields = newArray; FieldInfo[] newInfoArray = new FieldInfo[newSize]; System.arraycopy(fieldInfos, 0, newInfoArray, 0, numStoredFields); fieldInfos = newInfoArray; } storedFields[numStoredFields] = field; fieldInfos[numStoredFields] = fieldInfo; numStoredFields++; assert docState.testPoint("StoredFieldsWriterPerThread.processFields.writeField"); } }
public TermsHashConsumerPerField addField(TermsHashPerField termsHashPerField, FieldInfo fieldInfo) { return new TermVectorsConsumerPerField(termsHashPerField, this, fieldInfo); } void addFieldToFlush(TermVectorsConsumerPerField fieldToFlush) { if (numVectorFields == perFields.length) { int newSize = ArrayUtil.oversize(numVectorFields + 1, RamUsageEstimator.NUM_BYTES_OBJECT_REF); TermVectorsConsumerPerField[] newArray = new TermVectorsConsumerPerField[newSize]; System.arraycopy(perFields, 0, newArray, 0, numVectorFields); perFields = newArray; } perFields[numVectorFields++] = fieldToFlush; } @Override void startDocument() throws IOException { assert clearLastVectorFieldName(); reset(); }
public TermsHashConsumerPerField addField(TermsHashPerField termsHashPerField, FieldInfo fieldInfo) { return new TermVectorsConsumerPerField(termsHashPerField, this, fieldInfo); } void addFieldToFlush(TermVectorsConsumerPerField fieldToFlush) { if (numVectorFields == perFields.length) { int newSize = ArrayUtil.oversize(numVectorFields + 1, RamUsageEstimator.NUM_BYTES_OBJECT_REF); TermVectorsConsumerPerField[] newArray = new TermVectorsConsumerPerField[newSize]; System.arraycopy(perFields, 0, newArray, 0, numVectorFields); perFields = newArray; } perFields[numVectorFields++] = fieldToFlush; } @Override void startDocument() { assert clearLastVectorFieldName(); reset(); }
public void abort() {} /** Called once per field per document if term vectors * are enabled, to write the vectors to * RAMOutputStream, which is then quickly flushed to * the real term vectors files in the Directory. */ @Override void finish() throws IOException { if (!doVectors || termsHashPerField.bytesHash.size() == 0) { return; } termsWriter.addFieldToFlush(this); }
public void abort() {} /** Called once per field per document if term vectors * are enabled, to write the vectors to * RAMOutputStream, which is then quickly flushed to * the real term vectors files in the Directory. */ @Override void finish() { if (!doVectors || termsHashPerField.bytesHash.size() == 0) { return; } termsWriter.addFieldToFlush(this); }
public MergeSpecification findForcedDeletesMerges(SegmentInfos segmentInfos) throws CorruptIndexException, IOException { final List<SegmentInfoPerCommit> segments = segmentInfos.asList(); final int numSegments = segments.size(); if (verbose()) { message("findForcedDeleteMerges: " + numSegments + " segments"); } MergeSpecification spec = new MergeSpecification(); int firstSegmentWithDeletions = -1; IndexWriter w = writer.get(); assert w != null; for(int i=0;i<numSegments;i++) { final SegmentInfoPerCommit info = segmentInfos.info(i); int delCount = w.numDeletedDocs(info); if (delCount > 0) { if (verbose()) { message(" segment " + info.info.name + " has deletions"); } if (firstSegmentWithDeletions == -1) firstSegmentWithDeletions = i; else if (i - firstSegmentWithDeletions == mergeFactor) { // We've seen mergeFactor segments in a row with // deletions, so force a merge now: if (verbose()) { message(" add merge " + firstSegmentWithDeletions + " to " + (i-1) + " inclusive"); } spec.add(new OneMerge(segments.subList(firstSegmentWithDeletions, i))); firstSegmentWithDeletions = i; } } else if (firstSegmentWithDeletions != -1) { // End of a sequence of segments with deletions, so, // merge those past segments even if it's fewer than // mergeFactor segments if (verbose()) { message(" add merge " + firstSegmentWithDeletions + " to " + (i-1) + " inclusive"); } spec.add(new OneMerge(segments.subList(firstSegmentWithDeletions, i))); firstSegmentWithDeletions = -1; } } if (firstSegmentWithDeletions != -1) { if (verbose()) { message(" add merge " + firstSegmentWithDeletions + " to " + (numSegments-1) + " inclusive"); } spec.add(new OneMerge(segments.subList(firstSegmentWithDeletions, numSegments))); } return spec; }
public MergeSpecification findForcedDeletesMerges(SegmentInfos segmentInfos) throws IOException { final List<SegmentInfoPerCommit> segments = segmentInfos.asList(); final int numSegments = segments.size(); if (verbose()) { message("findForcedDeleteMerges: " + numSegments + " segments"); } MergeSpecification spec = new MergeSpecification(); int firstSegmentWithDeletions = -1; IndexWriter w = writer.get(); assert w != null; for(int i=0;i<numSegments;i++) { final SegmentInfoPerCommit info = segmentInfos.info(i); int delCount = w.numDeletedDocs(info); if (delCount > 0) { if (verbose()) { message(" segment " + info.info.name + " has deletions"); } if (firstSegmentWithDeletions == -1) firstSegmentWithDeletions = i; else if (i - firstSegmentWithDeletions == mergeFactor) { // We've seen mergeFactor segments in a row with // deletions, so force a merge now: if (verbose()) { message(" add merge " + firstSegmentWithDeletions + " to " + (i-1) + " inclusive"); } spec.add(new OneMerge(segments.subList(firstSegmentWithDeletions, i))); firstSegmentWithDeletions = i; } } else if (firstSegmentWithDeletions != -1) { // End of a sequence of segments with deletions, so, // merge those past segments even if it's fewer than // mergeFactor segments if (verbose()) { message(" add merge " + firstSegmentWithDeletions + " to " + (i-1) + " inclusive"); } spec.add(new OneMerge(segments.subList(firstSegmentWithDeletions, i))); firstSegmentWithDeletions = -1; } } if (firstSegmentWithDeletions != -1) { if (verbose()) { message(" add merge " + firstSegmentWithDeletions + " to " + (numSegments-1) + " inclusive"); } spec.add(new OneMerge(segments.subList(firstSegmentWithDeletions, numSegments))); } return spec; }
public MultiDocsEnum(MultiTermsEnum parent, int subReaderCount) { this.parent = parent; subDocsEnum = new DocsEnum[subReaderCount]; } MultiDocsEnum reset(final EnumWithSlice[] subs, final int numSubs) throws IOException { this.numSubs = numSubs; this.subs = new EnumWithSlice[subs.length]; for(int i=0;i<subs.length;i++) { this.subs[i] = new EnumWithSlice(); this.subs[i].docsEnum = subs[i].docsEnum; this.subs[i].slice = subs[i].slice; } upto = -1; current = null; return this; }
public MultiDocsEnum(MultiTermsEnum parent, int subReaderCount) { this.parent = parent; subDocsEnum = new DocsEnum[subReaderCount]; } MultiDocsEnum reset(final EnumWithSlice[] subs, final int numSubs) { this.numSubs = numSubs; this.subs = new EnumWithSlice[subs.length]; for(int i=0;i<subs.length;i++) { this.subs[i] = new EnumWithSlice(); this.subs[i].docsEnum = subs[i].docsEnum; this.subs[i].slice = subs[i].slice; } upto = -1; current = null; return this; }
public void work(double units) throws MergePolicy.MergeAbortedException { // do nothing } }; } }
public void work(double units) { // do nothing } }; } }
public static void run(Configuration conf, Path input, Path output, int numDims, int clusters, DistanceMeasure measure, double convergenceDelta, int maxIterations) throws IOException, InterruptedException, ClassNotFoundException { // create a few new Paths for temp files and transformations Path outputCalc = new Path(output, "calculations"); Path outputTmp = new Path(output, "temporary"); // Take in the raw CSV text file and split it ourselves, // creating our own SequenceFiles for the matrices to read later // (similar to the style of syntheticcontrol.canopy.InputMapper) Path affSeqFiles = new Path(outputCalc, "seqfile-" + (System.nanoTime() & 0xFF)); AffinityMatrixInputJob.runJob(input, affSeqFiles, numDims, numDims); // Next step: construct the affinity matrix using the newly-created // sequence files DistributedRowMatrix A = new DistributedRowMatrix(affSeqFiles, new Path(outputTmp, "afftmp-" + (System.nanoTime() & 0xFF)), numDims, numDims); Configuration depConf = new Configuration(conf); A.setConf(depConf); // Next step: construct the diagonal matrix D (represented as a vector) // and calculate the normalized Laplacian of the form: // L = D^(-0.5)AD^(-0.5) Vector D = MatrixDiagonalizeJob.runJob(affSeqFiles, numDims); DistributedRowMatrix L = VectorMatrixMultiplicationJob.runJob(affSeqFiles, D, new Path(outputCalc, "laplacian-" + (System.nanoTime() & 0xFF)), new Path(outputCalc, "laplacian-tmp-" + (System.nanoTime() & 0xFF))); L.setConf(depConf); // Next step: perform eigen-decomposition using LanczosSolver // since some of the eigen-output is spurious and will be eliminated // upon verification, we have to aim to overshoot and then discard // unnecessary vectors later int overshoot = (int) ((double) clusters * OVERSHOOT_MULTIPLIER); DistributedLanczosSolver solver = new DistributedLanczosSolver(); LanczosState state = new LanczosState(L, clusters, DistributedLanczosSolver.getInitialVector(L)); Path lanczosSeqFiles = new Path(outputCalc, "eigenvectors-" + (System.nanoTime() & 0xFF)); solver.runJob(conf, state, overshoot, true, lanczosSeqFiles.toString()); // perform a verification EigenVerificationJob verifier = new EigenVerificationJob(); Path verifiedEigensPath = new Path(outputCalc, "eigenverifier"); verifier.runJob(conf, lanczosSeqFiles, L.getRowPath(), verifiedEigensPath, true, 1.0, clusters); Path cleanedEigens = verifier.getCleanedEigensPath(); DistributedRowMatrix W = new DistributedRowMatrix(cleanedEigens, new Path(cleanedEigens, "tmp"), clusters, numDims); W.setConf(depConf); DistributedRowMatrix Wtrans = W.transpose(); // DistributedRowMatrix Wt = W.transpose(); // next step: normalize the rows of Wt to unit length Path unitVectors = new Path(outputCalc, "unitvectors-" + (System.nanoTime() & 0xFF)); UnitVectorizerJob.runJob(Wtrans.getRowPath(), unitVectors); DistributedRowMatrix Wt = new DistributedRowMatrix(unitVectors, new Path(unitVectors, "tmp"), clusters, numDims); Wt.setConf(depConf); // Finally, perform k-means clustering on the rows of L (or W) // generate random initial clusters Path initialclusters = RandomSeedGenerator.buildRandom(conf, Wt.getRowPath(), new Path(output, Cluster.INITIAL_CLUSTERS_DIR), clusters, measure); // The output format is the same as the K-means output format. // TODO: Perhaps a conversion of the output format from points and clusters // in eigenspace to the original dataset. Currently, the user has to perform // the association step after this job finishes on their own. KMeansDriver.run(conf, Wt.getRowPath(), initialclusters, output, measure, convergenceDelta, maxIterations, true, 0.0, false); }
public static void run(Configuration conf, Path input, Path output, int numDims, int clusters, DistanceMeasure measure, double convergenceDelta, int maxIterations) throws IOException, InterruptedException, ClassNotFoundException { // create a few new Paths for temp files and transformations Path outputCalc = new Path(output, "calculations"); Path outputTmp = new Path(output, "temporary"); // Take in the raw CSV text file and split it ourselves, // creating our own SequenceFiles for the matrices to read later // (similar to the style of syntheticcontrol.canopy.InputMapper) Path affSeqFiles = new Path(outputCalc, "seqfile-" + (System.nanoTime() & 0xFF)); AffinityMatrixInputJob.runJob(input, affSeqFiles, numDims, numDims); // Next step: construct the affinity matrix using the newly-created // sequence files DistributedRowMatrix A = new DistributedRowMatrix(affSeqFiles, new Path(outputTmp, "afftmp-" + (System.nanoTime() & 0xFF)), numDims, numDims); Configuration depConf = new Configuration(conf); A.setConf(depConf); // Next step: construct the diagonal matrix D (represented as a vector) // and calculate the normalized Laplacian of the form: // L = D^(-0.5)AD^(-0.5) Vector D = MatrixDiagonalizeJob.runJob(affSeqFiles, numDims); DistributedRowMatrix L = VectorMatrixMultiplicationJob.runJob(affSeqFiles, D, new Path(outputCalc, "laplacian-" + (System.nanoTime() & 0xFF)), new Path(outputCalc, "laplacian-tmp-" + (System.nanoTime() & 0xFF))); L.setConf(depConf); // Next step: perform eigen-decomposition using LanczosSolver // since some of the eigen-output is spurious and will be eliminated // upon verification, we have to aim to overshoot and then discard // unnecessary vectors later int overshoot = (int) ((double) clusters * OVERSHOOT_MULTIPLIER); DistributedLanczosSolver solver = new DistributedLanczosSolver(); LanczosState state = new LanczosState(L, overshoot, DistributedLanczosSolver.getInitialVector(L)); Path lanczosSeqFiles = new Path(outputCalc, "eigenvectors-" + (System.nanoTime() & 0xFF)); solver.runJob(conf, state, overshoot, true, lanczosSeqFiles.toString()); // perform a verification EigenVerificationJob verifier = new EigenVerificationJob(); Path verifiedEigensPath = new Path(outputCalc, "eigenverifier"); verifier.runJob(conf, lanczosSeqFiles, L.getRowPath(), verifiedEigensPath, true, 1.0, clusters); Path cleanedEigens = verifier.getCleanedEigensPath(); DistributedRowMatrix W = new DistributedRowMatrix(cleanedEigens, new Path(cleanedEigens, "tmp"), clusters, numDims); W.setConf(depConf); DistributedRowMatrix Wtrans = W.transpose(); // DistributedRowMatrix Wt = W.transpose(); // next step: normalize the rows of Wt to unit length Path unitVectors = new Path(outputCalc, "unitvectors-" + (System.nanoTime() & 0xFF)); UnitVectorizerJob.runJob(Wtrans.getRowPath(), unitVectors); DistributedRowMatrix Wt = new DistributedRowMatrix(unitVectors, new Path(unitVectors, "tmp"), clusters, numDims); Wt.setConf(depConf); // Finally, perform k-means clustering on the rows of L (or W) // generate random initial clusters Path initialclusters = RandomSeedGenerator.buildRandom(conf, Wt.getRowPath(), new Path(output, Cluster.INITIAL_CLUSTERS_DIR), clusters, measure); // The output format is the same as the K-means output format. // TODO: Perhaps a conversion of the output format from points and clusters // in eigenspace to the original dataset. Currently, the user has to perform // the association step after this job finishes on their own. KMeansDriver.run(conf, Wt.getRowPath(), initialclusters, output, measure, convergenceDelta, maxIterations, true, 0.0, false); }
public void test_errorcode() throws Exception { ResultSet rs = null; Statement s = createStatement(); s.executeUpdate( "create table t(i int, s smallint)"); s.executeUpdate( "insert into t values (1,2)"); s.executeUpdate("insert into t values (1,2)"); s.executeUpdate("insert into t values (null,2)"); //-- parser error //-- bug 5701 assertStatementError("42X94",30000,s,"create table t(i nt, s smallint)"); //-- non-boolean where clause assertStatementError("42X19", 30000, s, "select * from t where i"); // -- invalid correlation name for "*" assertStatementError("42X10",30000, s, "select asdf.* from t"); //-- execution time error assertStatementError("22012",30000,s,"select i/0 from t"); // -- test ErrorMessages VTI rs = s.executeQuery( "select * from SYSCS_DIAG.error_Messages where " + "CAST(sql_state AS CHAR(5)) = '07000'"); String [][] expRS = new String [][] { {"07000", "At least one parameter to the current statement " + "is uninitialized.", "20000"} }; JDBC.assertFullResultSet(rs,expRS); // Test severe error messages. Existing messages should not change SQLState. // new ones can be added. rs = s.executeQuery("select * from SYSCS_DIAG.Error_messages where SEVERITY >= 40000 order by SQL_STATE"); //Utilities.showResultSet(rs); String [][] expectedRows = {{"08000","Connection closed by unknown interrupt.","40000"}, {"08001","A connection could not be established because the security token is larger than the maximum allowed by the network protocol.","40000"}, {"08001","A connection could not be established because the user id has a length of zero or is larger than the maximum allowed by the network protocol.","40000"}, {"08001","A connection could not be established because the password has a length of zero or is larger than the maximum allowed by the network protocol.","40000"}, {"08001","A connection could not be established because the external name (EXTNAM) has a length of zero or is larger than the maximum allowed by the network protocol.","40000"}, {"08001","A connection could not be established because the server name (SRVNAM) has a length of zero or is larger than the maximum allowed by the network protocol.","40000"}, {"08001","Required Derby DataSource property {0} not set.","40000"}, {"08001","{0} : Error connecting to server {1} on port {2} with message {3}.","40000"}, {"08001","SocketException: '{0}'","40000"}, {"08001","Unable to open stream on socket: '{0}'.","40000"}, {"08001","User id length ({0}) is outside the range of 1 to {1}.","40000"}, {"08001","Password length ({0}) is outside the range of 1 to {1}.","40000"}, {"08001","User id can not be null.","40000"}, {"08001","Password can not be null.","40000"}, {"08001","A connection could not be established because the database name '{0}' is larger than the maximum length allowed by the network protocol.","40000"}, {"08003","No current connection.","40000"}, {"08003","getConnection() is not valid on a closed PooledConnection.","40000"}, {"08003","Lob method called after connection was closed","40000"}, {"08003","The underlying physical connection is stale or closed.","40000"}, {"08004","Connection refused : {0}","40000"}, {"08004","Connection authentication failure occurred. Reason: {0}.","40000"}, {"08004","The connection was refused because the database {0} was not found.","40000"}, {"08004","Database connection refused.","40000"}, {"08004","User '{0}' cannot shut down database '{1}'. Only the database owner can perform this operation.","40000"}, {"08004","User '{0}' cannot (re)encrypt database '{1}'. Only the database owner can perform this operation.","40000"}, {"08004","User '{0}' cannot hard upgrade database '{1}'. Only the database owner can perform this operation.","40000"}, {"08004","Connection refused to database '{0}' because it is in replication slave mode.","40000"}, {"08004","User '{0}' cannot issue a replication operation on database '{1}'. Only the database owner can perform this operation.","40000"}, {"08004","Missing permission for user '{0}' to shutdown system [{1}].","40000"}, {"08004","Cannot check system permission to create database '{0}' [{1}].","40000"}, {"08004","Missing permission for user '{0}' to create database '{1}' [{2}].","40000"}, {"08004","Connection authentication failure occurred. Either the supplied credentials were invalid, or the database uses a password encryption scheme not compatible with the strong password substitution security mechanism. If this error started after upgrade, refer to the release note for DERBY-4483 for options.","40000"}, {"08006","An error occurred during connect reset and the connection has been terminated. See chained exceptions for details.","40000"}, {"08006","SocketException: '{0}'","40000"}, {"08006","A communications error has been detected: {0}.","40000"}, {"08006","An error occurred during a deferred connect reset and the connection has been terminated. See chained exceptions for details.","40000"}, {"08006","Insufficient data while reading from the network - expected a minimum of {0} bytes and received only {1} bytes. The connection has been terminated.","40000"}, {"08006","Attempt to fully materialize lob data that is too large for the JVM. The connection has been terminated.","40000"}, {"08006","A network protocol error was encountered and the connection has been terminated: {0}","40000"}, {"08006","Database '{0}' shutdown.","45000"}, {"08006","Database '{0}' dropped.","45000"}, {"0A000","The DRDA command {0} is not currently implemented. The connection has been terminated.","40000"}, {"57017","There is no available conversion for the source code page, {0}, to the target code page, {1}. The connection has been terminated.","40000"}, {"58009","Network protocol exception: only one of the VCM, VCS length can be greater than 0. The connection has been terminated.","40000"}, {"58009","The connection was terminated because the encoding is not supported.","40000"}, {"58009","Network protocol exception: actual code point, {0}, does not match expected code point, {1}. The connection has been terminated.","40000"}, {"58009","Network protocol exception: DDM collection contains less than 4 bytes of data. The connection has been terminated.","40000"}, {"58009","Network protocol exception: collection stack not empty at end of same id chain parse. The connection has been terminated.","40000"}, {"58009","Network protocol exception: DSS length not 0 at end of same id chain parse. The connection has been terminated.","40000"}, {"58009","Network protocol exception: DSS chained with same id at end of same id chain parse. The connection has been terminated.","40000"}, {"58009","Network protocol exception: end of stream prematurely reached while reading InputStream, parameter #{0}. The connection has been terminated.","40000"}, {"58009","Network protocol exception: invalid FDOCA LID. The connection has been terminated.","40000"}, {"58009","Network protocol exception: SECTKN was not returned. The connection has been terminated.","40000"}, {"58009","Network protocol exception: only one of NVCM, NVCS can be non-null. The connection has been terminated.","40000"}, {"58009","Network protocol exception: SCLDTA length, {0}, is invalid for RDBNAM. The connection has been terminated.","40000"}, {"58009","Network protocol exception: SCLDTA length, {0}, is invalid for RDBCOLID. The connection has been terminated.","40000"}, {"58009","Network protocol exception: SCLDTA length, {0}, is invalid for PKGID. The connection has been terminated.","40000"}, {"58009","Network protocol exception: PKGNAMCSN length, {0}, is invalid at SQLAM {1}. The connection has been terminated.","40000"}, {"58010","A network protocol error was encountered. A connection could not be established because the manager {0} at level {1} is not supported by the server. ","40000"}, {"58014","The DDM command 0x{0} is not supported. The connection has been terminated.","40000"}, {"58015","The DDM object 0x{0} is not supported. The connection has been terminated.","40000"}, {"58016","The DDM parameter 0x{0} is not supported. The connection has been terminated.","40000"}, {"58017","The DDM parameter value 0x{0} is not supported. An input host variable may not be within the range the server supports. The connection has been terminated.","40000"}, {"XBM01","Startup failed due to an exception. See next exception for details. ","45000"}, {"XBM02","Startup failed due to missing functionality for {0}. Please ensure your classpath includes the correct Derby software.","45000"}, {"XBM03","Supplied value '{0}' for collation attribute is invalid, expecting UCS_BASIC or TERRITORY_BASED.","45000"}, {"XBM04","Collator support not available from the JVM for the database's locale '{0}'.","45000"}, {"XBM05","Startup failed due to missing product version information for {0}.","45000"}, {"XBM06","Startup failed. An encrypted database cannot be accessed without the correct boot password. ","45000"}, {"XBM07","Startup failed. Boot password must be at least 8 bytes long.","45000"}, {"XBM08","Could not instantiate {0} StorageFactory class {1}.","45000"}, {"XBM0A","The database directory '{0}' exists. However, it does not contain the expected '{1}' file. Perhaps Derby was brought down in the middle of creating this database. You may want to delete this directory and try creating the database again.","45000"}, {"XBM0G","Failed to start encryption engine. Please make sure you are running Java 2 and have downloaded an encryption provider such as jce and put it in your class path. ","45000"}, {"XBM0H","Directory {0} cannot be created.","45000"}, {"XBM0I","Directory {0} cannot be removed.","45000"}, {"XBM0J","Directory {0} already exists.","45000"}, {"XBM0K","Unknown sub-protocol for database name {0}.","45000"}, {"XBM0L","Specified authentication scheme class {0} does implement the authentication interface {1}.","45000"}, {"XBM0M","Error creating instance of authentication scheme class {0}.","45000"}, {"XBM0N","JDBC Driver registration with java.sql.DriverManager failed. See next exception for details. ","45000"}, {"XBM0P","Service provider is read-only. Operation not permitted. ","45000"}, {"XBM0Q","File {0} not found. Please make sure that backup copy is the correct one and it is not corrupted.","45000"}, {"XBM0R","Unable to remove File {0}. ","45000"}, {"XBM0S","Unable to rename file '{0}' to '{1}'","45000"}, {"XBM0T","Ambiguous sub-protocol for database name {0}. ","45000"}, {"XBM0X","Supplied territory description '{0}' is invalid, expecting ln[_CO[_variant]]\nln=lower-case two-letter ISO-639 language code, CO=upper-case two-letter ISO-3166 country codes, see java.util.Locale.","45000"}, {"XBM0Y","Backup database directory {0} not found. Please make sure that the specified backup path is right.","45000"}, {"XBM0Z","Unable to copy file '{0}' to '{1}'. Please make sure that there is enough space and permissions are correct. ","45000"}, {"XCW00","Unsupported upgrade from '{0}' to '{1}'.","45000"}, {"XJ004","Database '{0}' not found.","40000"}, {"XJ015","Derby system shutdown.","50000"}, {"XJ028","The URL '{0}' is not properly formed.","40000"}, {"XJ040","Failed to start database '{0}' with class loader {1}, see the next exception for details.","40000"}, {"XJ041","Failed to create database '{0}', see the next exception for details.","40000"}, {"XJ048","Conflicting boot attributes specified: {0}","40000"}, {"XJ049","Conflicting create attributes specified.","40000"}, {"XJ05B","JDBC attribute '{0}' has an invalid value '{1}', valid values are '{2}'.","40000"}, {"XJ081","Conflicting create/restore/recovery attributes specified.","40000"}, {"XJ213","The traceLevel connection property does not have a valid format for a number.","40000"}, {"XRE20","Failover performed successfully for database '{0}', the database has been shutdown.","45000"}, {"XSDB0","Unexpected exception on in-memory page {0}","45000"}, {"XSDB1","Unknown page format at page {0}","45000"}, {"XSDB2","Unknown container format at container {0} : {1}","45000"}, {"XSDB3","Container information cannot change once written: was {0}, now {1}","45000"}, {"XSDB4","Page {0} is at version {1}, the log file contains change version {2}, either there are log records of this page missing, or this page did not get written out to disk properly.","45000"}, {"XSDB5","Log has change record on page {0}, which is beyond the end of the container.","45000"}, {"XSDB6","Another instance of Derby may have already booted the database {0}.","45000"}, {"XSDB7","WARNING: Derby (instance {0}) is attempting to boot the database {1} even though Derby (instance {2}) may still be active. Only one instance of Derby should boot a database at a time. Severe and non-recoverable corruption can result and may have already occurred.","45000"}, {"XSDB8","WARNING: Derby (instance {0}) is attempting to boot the database {1} even though Derby (instance {2}) may still be active. Only one instance of Derby should boot a database at a time. Severe and non-recoverable corruption can result if 2 instances of Derby boot on the same database at the same time. The derby.database.forceDatabaseLock=true property has been set, so the database will not boot until the db.lck is no longer present. Normally this file is removed when the first instance of Derby to boot on the database exits, but it may be left behind in some shutdowns. It will be necessary to remove the file by hand in that case. It is important to verify that no other VM is accessing the database before deleting the db.lck file by hand.","45000"}, {"XSDB9","Stream container {0} is corrupt.","45000"}, {"XSDBA","Attempt to allocate object {0} failed.","45000"}, {"XSDBB", "Unknown page format at page {0}, page dump follows: {1} ", "45000"}, {"XSDBC", "Write of container information to page 0 of container {0} failed. See nested error for more information. ", "45000"}, {"XSDG0","Page {0} could not be read from disk.","45000"}, {"XSDG1","Page {0} could not be written to disk, please check if the disk is full, or if a file system limit, such as a quota or a maximum file size, has been reached.","45000"}, {"XSDG2","Invalid checksum on Page {0}, expected={1}, on-disk version={2}, page dump follows: {3}","45000"}, {"XSDG3","Meta-data for {0} could not be accessed to {1} {2}","45000"}, {"XSDG5","Database is not in create mode when createFinished is called.","45000"}, {"XSDG6","Data segment directory not found in {0} backup during restore. Please make sure that backup copy is the right one and it is not corrupted.","45000"}, {"XSDG7","Directory {0} could not be removed during restore. Please make sure that permissions are correct.","45000"}, {"XSDG8","Unable to copy directory '{0}' to '{1}' during restore. Please make sure that there is enough space and permissions are correct. ","45000"}, {"XSDG9","Derby thread received an interrupt during a disk I/O operation, please check your application for the source of the interrupt.","45000"}, {"XSLA0","Cannot flush the log file to disk {0}.","45000"}, {"XSLA1","Log Record has been sent to the stream, but it cannot be applied to the store (Object {0}). This may cause recovery problems also.","45000"}, {"XSLA2","System will shutdown, got I/O Exception while accessing log file.","45000"}, {"XSLA3","Log Corrupted, has invalid data in the log stream.","45000"}, {"XSLA4","Cannot write to the log, most likely the log is full. Please delete unnecessary files. It is also possible that the file system is read only, or the disk has failed, or some other problems with the media. ","45000"}, {"XSLA5","Cannot read log stream for some reason to rollback transaction {0}.","45000"}, {"XSLA6","Cannot recover the database.","45000"}, {"XSLA7","Cannot redo operation {0} in the log.","45000"}, {"XSLA8","Cannot rollback transaction {0}, trying to compensate {1} operation with {2}","45000"}, {"XSLAA","The store has been marked for shutdown by an earlier exception.","45000"}, {"XSLAB","Cannot find log file {0}, please make sure your logDevice property is properly set with the correct path separator for your platform.","45000"}, {"XSLAC","Database at {0} have incompatible format with the current version of software, it may have been created by or upgraded by a later version.","45000"}, {"XSLAD","log Record at instant {2} in log file {3} corrupted. Expected log record length {0}, real length {1}.","45000"}, {"XSLAE","Control file at {0} cannot be written or updated.","45000"}, {"XSLAF","A Read Only database was created with dirty data buffers.","45000"}, {"XSLAH","A Read Only database is being updated.","45000"}, {"XSLAI","Cannot log the checkpoint log record","45000"}, {"XSLAJ","The logging system has been marked to shut down due to an earlier problem and will not allow any more operations until the system shuts down and restarts.","45000"}, {"XSLAK","Database has exceeded largest log file number {0}.","45000"}, {"XSLAL","log record size {2} exceeded the maximum allowable log file size {3}. Error encountered in log file {0}, position {1}.","45000"}, {"XSLAM","Cannot verify database format at {1} due to IOException.","45000"}, {"XSLAN","Database at {0} has an incompatible format with the current version of the software. The database was created by or upgraded by version {1}.","45000"}, {"XSLAO","Recovery failed unexpected problem {0}.","45000"}, {"XSLAP","Database at {0} is at version {1}. Beta databases cannot be upgraded,","45000"}, {"XSLAQ","cannot create log file at directory {0}.","45000"}, {"XSLAR","Unable to copy log file '{0}' to '{1}' during restore. Please make sure that there is enough space and permissions are correct. ","45000"}, {"XSLAS","Log directory {0} not found in backup during restore. Please make sure that backup copy is the correct one and it is not corrupted.","45000"}, {"XSLAT","The log directory '{0}' exists. The directory might belong to another database. Check that the location specified for the logDevice attribute is correct.","45000"}, {"XSTB0","An exception was thrown during transaction abort.","50000"}, {"XSTB2","Cannot log transaction changes, maybe trying to write to a read only database.","50000"}, {"XSTB3","Cannot abort transaction because the log manager is null, probably due to an earlier error.","50000"}, {"XSTB5","Creating database with logging disabled encountered unexpected problem.","50000"}, {"XSTB6","Cannot substitute a transaction table with another while one is already in use.","50000"}, {"XXXXX","Normal database session close.","40000"}, {"XRE04","Could not establish a connection to the peer of the replicated database '{0}' on address '{1}:{2}'.","40000"}, {"XRE04","Connection lost for replicated database '{0}'.","40000"}, {"XRE05","The log files on the master and slave are not in synch for replicated database '{0}'. The master log instant is {1}:{2}, whereas the slave log instant is {3}:{4}. This is FATAL for replication - replication will be stopped.","40000"}, {"XRE09","Cannot start replication slave mode for database '{0}'. The database has already been booted.","40000"}, {"XRE11","Could not perform operation '{0}' because the database '{1}' has not been booted.","40000"}, {"XRE21","Error occurred while performing failover for database '{0}', Failover attempt was aborted.","40000"}, {"XRE22","Replication master has already been booted for database '{0}'","40000"}, {"XRE41","Replication operation 'failover' or 'stopSlave' refused on the slave database because the connection with the master is working. Issue the 'failover' or 'stopMaster' operation on the master database instead.","40000"}, {"XRE42","Replicated database '{0}' shutdown.","40000"}}; JDBC.assertUnorderedResultSet(rs, expectedRows); s.executeUpdate("drop table t"); commit(); s.close(); }
public void test_errorcode() throws Exception { ResultSet rs = null; Statement s = createStatement(); s.executeUpdate( "create table t(i int, s smallint)"); s.executeUpdate( "insert into t values (1,2)"); s.executeUpdate("insert into t values (1,2)"); s.executeUpdate("insert into t values (null,2)"); //-- parser error //-- bug 5701 assertStatementError("42X94",30000,s,"create table t(i nt, s smallint)"); //-- non-boolean where clause assertStatementError("42X19", 30000, s, "select * from t where i"); // -- invalid correlation name for "*" assertStatementError("42X10",30000, s, "select asdf.* from t"); //-- execution time error assertStatementError("22012",30000,s,"select i/0 from t"); // -- test ErrorMessages VTI rs = s.executeQuery( "select * from SYSCS_DIAG.error_Messages where " + "CAST(sql_state AS CHAR(5)) = '07000'"); String [][] expRS = new String [][] { {"07000", "At least one parameter to the current statement " + "is uninitialized.", "20000"} }; JDBC.assertFullResultSet(rs,expRS); // Test severe error messages. Existing messages should not change SQLState. // new ones can be added. rs = s.executeQuery("select * from SYSCS_DIAG.Error_messages where SEVERITY >= 40000 order by SQL_STATE"); //Utilities.showResultSet(rs); String [][] expectedRows = {{"08000","Connection closed by unknown interrupt.","40000"}, {"08001","A connection could not be established because the security token is larger than the maximum allowed by the network protocol.","40000"}, {"08001","A connection could not be established because the user id has a length of zero or is larger than the maximum allowed by the network protocol.","40000"}, {"08001","A connection could not be established because the password has a length of zero or is larger than the maximum allowed by the network protocol.","40000"}, {"08001","A connection could not be established because the external name (EXTNAM) has a length of zero or is larger than the maximum allowed by the network protocol.","40000"}, {"08001","A connection could not be established because the server name (SRVNAM) has a length of zero or is larger than the maximum allowed by the network protocol.","40000"}, {"08001","Required Derby DataSource property {0} not set.","40000"}, {"08001","{0} : Error connecting to server {1} on port {2} with message {3}.","40000"}, {"08001","SocketException: '{0}'","40000"}, {"08001","Unable to open stream on socket: '{0}'.","40000"}, {"08001","User id length ({0}) is outside the range of 1 to {1}.","40000"}, {"08001","Password length ({0}) is outside the range of 1 to {1}.","40000"}, {"08001","User id can not be null.","40000"}, {"08001","Password can not be null.","40000"}, {"08001","A connection could not be established because the database name '{0}' is larger than the maximum length allowed by the network protocol.","40000"}, {"08003","No current connection.","40000"}, {"08003","getConnection() is not valid on a closed PooledConnection.","40000"}, {"08003","Lob method called after connection was closed","40000"}, {"08003","The underlying physical connection is stale or closed.","40000"}, {"08004","Connection refused : {0}","40000"}, {"08004","Connection authentication failure occurred. Reason: {0}.","40000"}, {"08004","The connection was refused because the database {0} was not found.","40000"}, {"08004","Database connection refused.","40000"}, {"08004","User '{0}' cannot shut down database '{1}'. Only the database owner can perform this operation.","40000"}, {"08004","User '{0}' cannot (re)encrypt database '{1}'. Only the database owner can perform this operation.","40000"}, {"08004","User '{0}' cannot hard upgrade database '{1}'. Only the database owner can perform this operation.","40000"}, {"08004","Connection refused to database '{0}' because it is in replication slave mode.","40000"}, {"08004","User '{0}' cannot issue a replication operation on database '{1}'. Only the database owner can perform this operation.","40000"}, {"08004","Missing permission for user '{0}' to shutdown system [{1}].","40000"}, {"08004","Cannot check system permission to create database '{0}' [{1}].","40000"}, {"08004","Missing permission for user '{0}' to create database '{1}' [{2}].","40000"}, {"08004","Connection authentication failure occurred. Either the supplied credentials were invalid, or the database uses a password encryption scheme not compatible with the strong password substitution security mechanism. If this error started after upgrade, refer to the release note for DERBY-4483 for options.","40000"}, {"08006","An error occurred during connect reset and the connection has been terminated. See chained exceptions for details.","40000"}, {"08006","SocketException: '{0}'","40000"}, {"08006","A communications error has been detected: {0}.","40000"}, {"08006","An error occurred during a deferred connect reset and the connection has been terminated. See chained exceptions for details.","40000"}, {"08006","Insufficient data while reading from the network - expected a minimum of {0} bytes and received only {1} bytes. The connection has been terminated.","40000"}, {"08006","Attempt to fully materialize lob data that is too large for the JVM. The connection has been terminated.","40000"}, {"08006","A network protocol error was encountered and the connection has been terminated: {0}","40000"}, {"08006","Database '{0}' shutdown.","45000"}, {"08006","Database '{0}' dropped.","45000"}, {"0A000","The DRDA command {0} is not currently implemented. The connection has been terminated.","40000"}, {"57017","There is no available conversion for the source code page, {0}, to the target code page, {1}. The connection has been terminated.","40000"}, {"58009","Network protocol exception: only one of the VCM, VCS length can be greater than 0. The connection has been terminated.","40000"}, {"58009","The connection was terminated because the encoding is not supported.","40000"}, {"58009","Network protocol exception: actual code point, {0}, does not match expected code point, {1}. The connection has been terminated.","40000"}, {"58009","Network protocol exception: DDM collection contains less than 4 bytes of data. The connection has been terminated.","40000"}, {"58009","Network protocol exception: collection stack not empty at end of same id chain parse. The connection has been terminated.","40000"}, {"58009","Network protocol exception: DSS length not 0 at end of same id chain parse. The connection has been terminated.","40000"}, {"58009","Network protocol exception: DSS chained with same id at end of same id chain parse. The connection has been terminated.","40000"}, {"58009","Network protocol exception: end of stream prematurely reached while reading InputStream, parameter #{0}. The connection has been terminated.","40000"}, {"58009","Network protocol exception: invalid FDOCA LID. The connection has been terminated.","40000"}, {"58009","Network protocol exception: SECTKN was not returned. The connection has been terminated.","40000"}, {"58009","Network protocol exception: only one of NVCM, NVCS can be non-null. The connection has been terminated.","40000"}, {"58009","Network protocol exception: SCLDTA length, {0}, is invalid for RDBNAM. The connection has been terminated.","40000"}, {"58009","Network protocol exception: SCLDTA length, {0}, is invalid for RDBCOLID. The connection has been terminated.","40000"}, {"58009","Network protocol exception: SCLDTA length, {0}, is invalid for PKGID. The connection has been terminated.","40000"}, {"58009","Network protocol exception: PKGNAMCSN length, {0}, is invalid at SQLAM {1}. The connection has been terminated.","40000"}, {"58010","A network protocol error was encountered. A connection could not be established because the manager {0} at level {1} is not supported by the server. ","40000"}, {"58014","The DDM command 0x{0} is not supported. The connection has been terminated.","40000"}, {"58015","The DDM object 0x{0} is not supported. The connection has been terminated.","40000"}, {"58016","The DDM parameter 0x{0} is not supported. The connection has been terminated.","40000"}, {"58017","The DDM parameter value 0x{0} is not supported. An input host variable may not be within the range the server supports. The connection has been terminated.","40000"}, {"XBM01","Startup failed due to an exception. See next exception for details. ","45000"}, {"XBM02","Startup failed due to missing functionality for {0}. Please ensure your classpath includes the correct Derby software.","45000"}, {"XBM03","Supplied value '{0}' for collation attribute is invalid, expecting UCS_BASIC or TERRITORY_BASED.","45000"}, {"XBM04","Collator support not available from the JVM for the database's locale '{0}'.","45000"}, {"XBM05","Startup failed due to missing product version information for {0}.","45000"}, {"XBM06","Startup failed. An encrypted database cannot be accessed without the correct boot password. ","45000"}, {"XBM07","Startup failed. Boot password must be at least 8 bytes long.","45000"}, {"XBM08","Could not instantiate {0} StorageFactory class {1}.","45000"}, {"XBM0A","The database directory '{0}' exists. However, it does not contain the expected '{1}' file. Perhaps Derby was brought down in the middle of creating this database. You may want to delete this directory and try creating the database again.","45000"}, {"XBM0G","Failed to start encryption engine. Please make sure you are running Java 2 and have downloaded an encryption provider such as jce and put it in your class path. ","45000"}, {"XBM0H","Directory {0} cannot be created.","45000"}, {"XBM0I","Directory {0} cannot be removed.","45000"}, {"XBM0J","Directory {0} already exists.","45000"}, {"XBM0K","Unknown sub-protocol for database name {0}.","45000"}, {"XBM0L","Specified authentication scheme class {0} does implement the authentication interface {1}.","45000"}, {"XBM0M","Error creating instance of authentication scheme class {0}.","45000"}, {"XBM0N","JDBC Driver registration with java.sql.DriverManager failed. See next exception for details. ","45000"}, {"XBM0P","Service provider is read-only. Operation not permitted. ","45000"}, {"XBM0Q","File {0} not found. Please make sure that backup copy is the correct one and it is not corrupted.","45000"}, {"XBM0R","Unable to remove File {0}. ","45000"}, {"XBM0S","Unable to rename file '{0}' to '{1}'","45000"}, {"XBM0T","Ambiguous sub-protocol for database name {0}. ","45000"}, {"XBM0X","Supplied territory description '{0}' is invalid, expecting ln[_CO[_variant]]\nln=lower-case two-letter ISO-639 language code, CO=upper-case two-letter ISO-3166 country codes, see java.util.Locale.","45000"}, {"XBM0Y","Backup database directory {0} not found. Please make sure that the specified backup path is right.","45000"}, {"XBM0Z","Unable to copy file '{0}' to '{1}'. Please make sure that there is enough space and permissions are correct. ","45000"}, {"XCW00","Unsupported upgrade from '{0}' to '{1}'.","45000"}, {"XJ004","Database '{0}' not found.","40000"}, {"XJ015","Derby system shutdown.","50000"}, {"XJ028","The URL '{0}' is not properly formed.","40000"}, {"XJ040","Failed to start database '{0}' with class loader {1}, see the next exception for details.","40000"}, {"XJ041","Failed to create database '{0}', see the next exception for details.","40000"}, {"XJ048","Conflicting boot attributes specified: {0}","40000"}, {"XJ049","Conflicting create attributes specified.","40000"}, {"XJ05B","JDBC attribute '{0}' has an invalid value '{1}', valid values are '{2}'.","40000"}, {"XJ081","Conflicting create/restore/recovery attributes specified.","40000"}, {"XJ213","The traceLevel connection property does not have a valid format for a number.","40000"}, {"XRE20","Failover performed successfully for database '{0}', the database has been shutdown.","45000"}, {"XSDB0","Unexpected exception on in-memory page {0}","45000"}, {"XSDB1","Unknown page format at page {0}","45000"}, {"XSDB2","Unknown container format at container {0} : {1}","45000"}, {"XSDB3","Container information cannot change once written: was {0}, now {1}","45000"}, {"XSDB4","Page {0} is at version {1}, the log file contains change version {2}, either there are log records of this page missing, or this page did not get written out to disk properly.","45000"}, {"XSDB5","Log has change record on page {0}, which is beyond the end of the container.","45000"}, {"XSDB6","Another instance of Derby may have already booted the database {0}.","45000"}, {"XSDB7","WARNING: Derby (instance {0}) is attempting to boot the database {1} even though Derby (instance {2}) may still be active. Only one instance of Derby should boot a database at a time. Severe and non-recoverable corruption can result and may have already occurred.","45000"}, {"XSDB8","WARNING: Derby (instance {0}) is attempting to boot the database {1} even though Derby (instance {2}) may still be active. Only one instance of Derby should boot a database at a time. Severe and non-recoverable corruption can result if 2 instances of Derby boot on the same database at the same time. The derby.database.forceDatabaseLock=true property has been set, so the database will not boot until the db.lck is no longer present. Normally this file is removed when the first instance of Derby to boot on the database exits, but it may be left behind in some shutdowns. It will be necessary to remove the file by hand in that case. It is important to verify that no other VM is accessing the database before deleting the db.lck file by hand.","45000"}, {"XSDB9","Stream container {0} is corrupt.","45000"}, {"XSDBA","Attempt to allocate object {0} failed.","45000"}, {"XSDBB", "Unknown page format at page {0}, page dump follows: {1} ", "45000"}, {"XSDBC", "Write of container information to page 0 of container {0} failed. See nested error for more information. ", "45000"}, {"XSDG0","Page {0} could not be read from disk.","45000"}, {"XSDG1","Page {0} could not be written to disk, please check if the disk is full, or if a file system limit, such as a quota or a maximum file size, has been reached.","45000"}, {"XSDG2","Invalid checksum on Page {0}, expected={1}, on-disk version={2}, page dump follows: {3}","45000"}, {"XSDG3","Meta-data for {0} could not be accessed to {1} {2}","45000"}, {"XSDG5","Database is not in create mode when createFinished is called.","45000"}, {"XSDG6","Data segment directory not found in {0} backup during restore. Please make sure that backup copy is the right one and it is not corrupted.","45000"}, {"XSDG7","Directory {0} could not be removed during restore. Please make sure that permissions are correct.","45000"}, {"XSDG8","Unable to copy directory '{0}' to '{1}' during restore. Please make sure that there is enough space and permissions are correct. ","45000"}, {"XSDG9","Derby thread received an interrupt during a disk I/O operation, please check your application for the source of the interrupt.","45000"}, {"XSLA0","Cannot flush the log file to disk {0}.","45000"}, {"XSLA1","Log Record has been sent to the stream, but it cannot be applied to the store (Object {0}). This may cause recovery problems also.","45000"}, {"XSLA2","System will shutdown, got I/O Exception while accessing log file.","45000"}, {"XSLA3","Log Corrupted, has invalid data in the log stream.","45000"}, {"XSLA4","Error encountered when attempting to write the transaction recovery log. Most likely the disk holding the recovery log is full. If the disk is full, the only way to proceed is to free up space on the disk by either expanding it or deleting files not related to Derby. It is also possible that the file system and/or disk where the Derby transaction log resides is read-only. The error can also be encountered if the disk or file system has failed.","45000"}, {"XSLA5","Cannot read log stream for some reason to rollback transaction {0}.","45000"}, {"XSLA6","Cannot recover the database.","45000"}, {"XSLA7","Cannot redo operation {0} in the log.","45000"}, {"XSLA8","Cannot rollback transaction {0}, trying to compensate {1} operation with {2}","45000"}, {"XSLAA","The store has been marked for shutdown by an earlier exception.","45000"}, {"XSLAB","Cannot find log file {0}, please make sure your logDevice property is properly set with the correct path separator for your platform.","45000"}, {"XSLAC","Database at {0} have incompatible format with the current version of software, it may have been created by or upgraded by a later version.","45000"}, {"XSLAD","log Record at instant {2} in log file {3} corrupted. Expected log record length {0}, real length {1}.","45000"}, {"XSLAE","Control file at {0} cannot be written or updated.","45000"}, {"XSLAF","A Read Only database was created with dirty data buffers.","45000"}, {"XSLAH","A Read Only database is being updated.","45000"}, {"XSLAI","Cannot log the checkpoint log record","45000"}, {"XSLAJ","The logging system has been marked to shut down due to an earlier problem and will not allow any more operations until the system shuts down and restarts.","45000"}, {"XSLAK","Database has exceeded largest log file number {0}.","45000"}, {"XSLAL","log record size {2} exceeded the maximum allowable log file size {3}. Error encountered in log file {0}, position {1}.","45000"}, {"XSLAM","Cannot verify database format at {1} due to IOException.","45000"}, {"XSLAN","Database at {0} has an incompatible format with the current version of the software. The database was created by or upgraded by version {1}.","45000"}, {"XSLAO","Recovery failed unexpected problem {0}.","45000"}, {"XSLAP","Database at {0} is at version {1}. Beta databases cannot be upgraded,","45000"}, {"XSLAQ","cannot create log file at directory {0}.","45000"}, {"XSLAR","Unable to copy log file '{0}' to '{1}' during restore. Please make sure that there is enough space and permissions are correct. ","45000"}, {"XSLAS","Log directory {0} not found in backup during restore. Please make sure that backup copy is the correct one and it is not corrupted.","45000"}, {"XSLAT","The log directory '{0}' exists. The directory might belong to another database. Check that the location specified for the logDevice attribute is correct.","45000"}, {"XSTB0","An exception was thrown during transaction abort.","50000"}, {"XSTB2","Cannot log transaction changes, maybe trying to write to a read only database.","50000"}, {"XSTB3","Cannot abort transaction because the log manager is null, probably due to an earlier error.","50000"}, {"XSTB5","Creating database with logging disabled encountered unexpected problem.","50000"}, {"XSTB6","Cannot substitute a transaction table with another while one is already in use.","50000"}, {"XXXXX","Normal database session close.","40000"}, {"XRE04","Could not establish a connection to the peer of the replicated database '{0}' on address '{1}:{2}'.","40000"}, {"XRE04","Connection lost for replicated database '{0}'.","40000"}, {"XRE05","The log files on the master and slave are not in synch for replicated database '{0}'. The master log instant is {1}:{2}, whereas the slave log instant is {3}:{4}. This is FATAL for replication - replication will be stopped.","40000"}, {"XRE09","Cannot start replication slave mode for database '{0}'. The database has already been booted.","40000"}, {"XRE11","Could not perform operation '{0}' because the database '{1}' has not been booted.","40000"}, {"XRE21","Error occurred while performing failover for database '{0}', Failover attempt was aborted.","40000"}, {"XRE22","Replication master has already been booted for database '{0}'","40000"}, {"XRE41","Replication operation 'failover' or 'stopSlave' refused on the slave database because the connection with the master is working. Issue the 'failover' or 'stopMaster' operation on the master database instead.","40000"}, {"XRE42","Replicated database '{0}' shutdown.","40000"}}; JDBC.assertUnorderedResultSet(rs, expectedRows); s.executeUpdate("drop table t"); commit(); s.close(); }
public void write(TextResponseWriter writer, String name, IndexableField f) throws IOException { writer.writeStr(name, f.stringValue(), false); }
public void write(TextResponseWriter writer, String name, IndexableField f) throws IOException { writer.writeStr(name, f.stringValue(), true); }
public void write(TextResponseWriter writer, String name, IndexableField field) throws IOException { writer.writeStr(name, field.stringValue(), false); }
public void write(TextResponseWriter writer, String name, IndexableField field) throws IOException { writer.writeStr(name, field.stringValue(), true); }
public static IndexReader maybeWrapReader(IndexReader r) throws IOException { Random random = random(); if (rarely()) { // TODO: remove this, and fix those tests to wrap before putting slow around: final boolean wasOriginallyAtomic = r instanceof AtomicReader; for (int i = 0, c = random.nextInt(6)+1; i < c; i++) { switch(random.nextInt(5)) { case 0: r = SlowCompositeReaderWrapper.wrap(r); break; case 1: // will create no FC insanity in atomic case, as ParallelAtomicReader has own cache key: r = (r instanceof AtomicReader) ? new ParallelAtomicReader((AtomicReader) r) : new ParallelCompositeReader((CompositeReader) r); break; case 2: // Häckidy-Hick-Hack: a standard MultiReader will cause FC insanity, so we use // QueryUtils' reader with a fake cache key, so insanity checker cannot walk // along our reader: r = new FCInvisibleMultiReader(r); break; case 3: final AtomicReader ar = SlowCompositeReaderWrapper.wrap(r); final List<String> allFields = new ArrayList<String>(); for (FieldInfo fi : ar.getFieldInfos()) { allFields.add(fi.name); } Collections.shuffle(allFields, random); final int end = allFields.isEmpty() ? 0 : random.nextInt(allFields.size()); final Set<String> fields = new HashSet<String>(allFields.subList(0, end)); // will create no FC insanity as ParallelAtomicReader has own cache key: r = new ParallelAtomicReader( new FieldFilterAtomicReader(ar, fields, false), new FieldFilterAtomicReader(ar, fields, true) ); break; case 4: // Häckidy-Hick-Hack: a standard Reader will cause FC insanity, so we use // QueryUtils' reader with a fake cache key, so insanity checker cannot walk // along our reader: if (r instanceof AtomicReader) { r = new FCInvisibleMultiReader(new AssertingAtomicReader((AtomicReader)r)); } else if (r instanceof DirectoryReader) { r = new FCInvisibleMultiReader(new AssertingDirectoryReader((DirectoryReader)r)); } break; default: fail("should not get here"); } } if (wasOriginallyAtomic) { r = SlowCompositeReaderWrapper.wrap(r); } else if ((r instanceof CompositeReader) && !(r instanceof FCInvisibleMultiReader)) { // prevent cache insanity caused by e.g. ParallelCompositeReader, to fix we wrap one more time: r = new FCInvisibleMultiReader(r); } if (VERBOSE) { System.out.println("maybeWrapReader wrapped: " +r); } } return r; }
public static IndexReader maybeWrapReader(IndexReader r) throws IOException { Random random = random(); if (rarely()) { // TODO: remove this, and fix those tests to wrap before putting slow around: final boolean wasOriginallyAtomic = r instanceof AtomicReader; for (int i = 0, c = random.nextInt(6)+1; i < c; i++) { switch(random.nextInt(5)) { case 0: r = SlowCompositeReaderWrapper.wrap(r); break; case 1: // will create no FC insanity in atomic case, as ParallelAtomicReader has own cache key: r = (r instanceof AtomicReader) ? new ParallelAtomicReader((AtomicReader) r) : new ParallelCompositeReader((CompositeReader) r); break; case 2: // Häckidy-Hick-Hack: a standard MultiReader will cause FC insanity, so we use // QueryUtils' reader with a fake cache key, so insanity checker cannot walk // along our reader: r = new FCInvisibleMultiReader(r); break; case 3: final AtomicReader ar = SlowCompositeReaderWrapper.wrap(r); final List<String> allFields = new ArrayList<String>(); for (FieldInfo fi : ar.getFieldInfos()) { allFields.add(fi.name); } Collections.shuffle(allFields, random); final int end = allFields.isEmpty() ? 0 : random.nextInt(allFields.size()); final Set<String> fields = new HashSet<String>(allFields.subList(0, end)); // will create no FC insanity as ParallelAtomicReader has own cache key: r = new ParallelAtomicReader( new FieldFilterAtomicReader(ar, fields, false), new FieldFilterAtomicReader(ar, fields, true) ); break; case 4: // Häckidy-Hick-Hack: a standard Reader will cause FC insanity, so we use // QueryUtils' reader with a fake cache key, so insanity checker cannot walk // along our reader: if (r instanceof AtomicReader) { r = new FCInvisibleMultiReader(new AssertingAtomicReader((AtomicReader)r)); } else if (r instanceof DirectoryReader) { r = new FCInvisibleMultiReader((DirectoryReader)r); } break; default: fail("should not get here"); } } if (wasOriginallyAtomic) { r = SlowCompositeReaderWrapper.wrap(r); } else if ((r instanceof CompositeReader) && !(r instanceof FCInvisibleMultiReader)) { // prevent cache insanity caused by e.g. ParallelCompositeReader, to fix we wrap one more time: r = new FCInvisibleMultiReader(r); } if (VERBOSE) { System.out.println("maybeWrapReader wrapped: " +r); } } return r; }
protected void doFieldSortValues(ResponseBuilder rb, SolrIndexSearcher searcher) throws IOException { SolrQueryRequest req = rb.req; SolrQueryResponse rsp = rb.rsp; // The query cache doesn't currently store sort field values, and SolrIndexSearcher doesn't // currently have an option to return sort field values. Because of this, we // take the documents given and re-derive the sort values. boolean fsv = req.getParams().getBool(ResponseBuilder.FIELD_SORT_VALUES,false); if(fsv){ Sort sort = rb.getSortSpec().getSort(); SortField[] sortFields = sort==null ? new SortField[]{SortField.FIELD_SCORE} : sort.getSort(); NamedList sortVals = new NamedList(); // order is important for the sort fields Field field = new Field("dummy", "", Field.Store.YES, Field.Index.NO); // a dummy Field SolrIndexReader reader = searcher.getReader(); SolrIndexReader[] readers = reader.getLeafReaders(); SolrIndexReader subReader = reader; if (readers.length==1) { // if there is a single segment, use that subReader and avoid looking up each time subReader = readers[0]; readers=null; } int[] offsets = reader.getLeafOffsets(); for (SortField sortField: sortFields) { int type = sortField.getType(); if (type==SortField.SCORE || type==SortField.DOC) continue; FieldComparator comparator = null; FieldComparator comparators[] = (readers==null) ? null : new FieldComparator[readers.length]; String fieldname = sortField.getField(); FieldType ft = fieldname==null ? null : req.getSchema().getFieldTypeNoEx(fieldname); DocList docList = rb.getResults().docList; ArrayList<Object> vals = new ArrayList<Object>(docList.size()); DocIterator it = rb.getResults().docList.iterator(); int offset = 0; int idx = 0; while(it.hasNext()) { int doc = it.nextDoc(); if (readers != null) { idx = SolrIndexReader.readerIndex(doc, offsets); subReader = readers[idx]; offset = offsets[idx]; comparator = comparators[idx]; } if (comparator == null) { comparator = sortField.getComparator(1,0); comparator.setNextReader(subReader, offset); if (comparators != null) comparators[idx] = comparator; } doc -= offset; // adjust for what segment this is in comparator.copy(0, doc); Object val = comparator.value(0); // Sortable float, double, int, long types all just use a string // comparator. For these, we need to put the type into a readable // format. One reason for this is that XML can't represent all // string values (or even all unicode code points). // indexedToReadable() should be a no-op and should // thus be harmless anyway (for all current ways anyway) if (val instanceof String) { field.setValue((String)val); val = ft.toObject(field); } // Must do the same conversion when sorting by a // String field in Lucene, which returns the terms // data as BytesRef: if (val instanceof BytesRef) { field.setValue(((BytesRef)val).utf8ToString()); val = ft.toObject(field); } vals.add(val); } sortVals.add(fieldname, vals); } rsp.add("sort_values", sortVals); } }
protected void doFieldSortValues(ResponseBuilder rb, SolrIndexSearcher searcher) throws IOException { SolrQueryRequest req = rb.req; SolrQueryResponse rsp = rb.rsp; // The query cache doesn't currently store sort field values, and SolrIndexSearcher doesn't // currently have an option to return sort field values. Because of this, we // take the documents given and re-derive the sort values. boolean fsv = req.getParams().getBool(ResponseBuilder.FIELD_SORT_VALUES,false); if(fsv){ Sort sort = rb.getSortSpec().getSort(); SortField[] sortFields = sort==null ? new SortField[]{SortField.FIELD_SCORE} : sort.getSort(); NamedList sortVals = new NamedList(); // order is important for the sort fields Field field = new Field("dummy", "", Field.Store.YES, Field.Index.NO); // a dummy Field SolrIndexReader reader = searcher.getReader(); SolrIndexReader[] readers = reader.getLeafReaders(); SolrIndexReader subReader = reader; if (readers.length==1) { // if there is a single segment, use that subReader and avoid looking up each time subReader = readers[0]; readers=null; } int[] offsets = reader.getLeafOffsets(); for (SortField sortField: sortFields) { int type = sortField.getType(); if (type==SortField.SCORE || type==SortField.DOC) continue; FieldComparator comparator = null; FieldComparator comparators[] = (readers==null) ? null : new FieldComparator[readers.length]; String fieldname = sortField.getField(); FieldType ft = fieldname==null ? null : req.getSchema().getFieldTypeNoEx(fieldname); DocList docList = rb.getResults().docList; ArrayList<Object> vals = new ArrayList<Object>(docList.size()); DocIterator it = rb.getResults().docList.iterator(); int offset = 0; int idx = 0; while(it.hasNext()) { int doc = it.nextDoc(); if (readers != null) { idx = SolrIndexReader.readerIndex(doc, offsets); subReader = readers[idx]; offset = offsets[idx]; comparator = comparators[idx]; } if (comparator == null) { comparator = sortField.getComparator(1,0); comparator = comparator.setNextReader(subReader, offset); if (comparators != null) comparators[idx] = comparator; } doc -= offset; // adjust for what segment this is in comparator.copy(0, doc); Object val = comparator.value(0); // Sortable float, double, int, long types all just use a string // comparator. For these, we need to put the type into a readable // format. One reason for this is that XML can't represent all // string values (or even all unicode code points). // indexedToReadable() should be a no-op and should // thus be harmless anyway (for all current ways anyway) if (val instanceof String) { field.setValue((String)val); val = ft.toObject(field); } // Must do the same conversion when sorting by a // String field in Lucene, which returns the terms // data as BytesRef: if (val instanceof BytesRef) { field.setValue(((BytesRef)val).utf8ToString()); val = ft.toObject(field); } vals.add(val); } sortVals.add(fieldname, vals); } rsp.add("sort_values", sortVals); } }
public TopGroupSortCollector(ValueSource groupByVS, Map vsContext, Sort sort, Sort groupSort, int nGroups) throws IOException { super(groupByVS, vsContext, sort, nGroups); this.groupSort = groupSort; } void constructComparators(FieldComparator[] comparators, int[] reversed, SortField[] sortFields, int size) throws IOException { for (int i = 0; i < sortFields.length; i++) { SortField sortField = sortFields[i]; reversed[i] = sortField.getReverse() ? -1 : 1; comparators[i] = sortField.getComparator(size, i); if (scorer != null) comparators[i].setScorer(scorer); if (reader != null) comparators[i].setNextReader(reader, docBase); } }
public TopGroupSortCollector(ValueSource groupByVS, Map vsContext, Sort sort, Sort groupSort, int nGroups) throws IOException { super(groupByVS, vsContext, sort, nGroups); this.groupSort = groupSort; } void constructComparators(FieldComparator[] comparators, int[] reversed, SortField[] sortFields, int size) throws IOException { for (int i = 0; i < sortFields.length; i++) { SortField sortField = sortFields[i]; reversed[i] = sortField.getReverse() ? -1 : 1; comparators[i] = sortField.getComparator(size, i); if (scorer != null) comparators[i].setScorer(scorer); if (reader != null) comparators[i] = comparators[i].setNextReader(reader, docBase); } }