diff --git a/.history b/.history index 209db6b195..7bbf31e478 100644 --- a/.history +++ b/.history @@ -1,2 +1,4 @@ update reload +projects +exit diff --git a/akka-actor-tests/src/test/scala/akka/actor/SchedulerSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/SchedulerSpec.scala index 7ab547cf1f..e1f879d99d 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/SchedulerSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/SchedulerSpec.scala @@ -4,21 +4,21 @@ import org.scalatest.BeforeAndAfterEach import akka.testkit.TestEvent._ import akka.testkit.EventFilter import org.multiverse.api.latches.StandardLatch -import java.util.concurrent.{ ScheduledFuture, ConcurrentLinkedQueue, CountDownLatch, TimeUnit } +import java.util.concurrent.{ ConcurrentLinkedQueue, CountDownLatch, TimeUnit } import akka.testkit.AkkaSpec +import org.jboss.netty.akka.util.{ Timeout ⇒ TimeOut } @org.junit.runner.RunWith(classOf[org.scalatest.junit.JUnitRunner]) class SchedulerSpec extends AkkaSpec with BeforeAndAfterEach { - private val futures = new ConcurrentLinkedQueue[ScheduledFuture[AnyRef]]() + private val timeouts = new ConcurrentLinkedQueue[TimeOut]() - def collectFuture(f: ⇒ ScheduledFuture[AnyRef]): ScheduledFuture[AnyRef] = { - val future = f - futures.add(future) - future + def collectTimeout(t: TimeOut): TimeOut = { + timeouts.add(t) + t } override def afterEach { - while (futures.peek() ne null) { Option(futures.poll()).foreach(_.cancel(true)) } + while (timeouts.peek() ne null) { Option(timeouts.poll()).foreach(_.cancel()) } } "A Scheduler" must { @@ -30,14 +30,14 @@ class SchedulerSpec extends AkkaSpec with BeforeAndAfterEach { def receive = { case Tick ⇒ countDownLatch.countDown() } }) // run every 50 millisec - collectFuture(app.scheduler.schedule(tickActor, Tick, 0, 50, TimeUnit.MILLISECONDS)) + collectTimeout(app.scheduler.schedule(tickActor, Tick, 0, 50, TimeUnit.MILLISECONDS)) // after max 1 second it should be executed at least the 3 times already assert(countDownLatch.await(1, TimeUnit.SECONDS)) val countDownLatch2 = new CountDownLatch(3) - collectFuture(app.scheduler.schedule(() ⇒ countDownLatch2.countDown(), 0, 50, TimeUnit.MILLISECONDS)) + collectTimeout(app.scheduler.schedule(() ⇒ countDownLatch2.countDown(), 0, 50, TimeUnit.MILLISECONDS)) // after max 1 second it should be executed at least the 3 times already assert(countDownLatch2.await(2, TimeUnit.SECONDS)) @@ -49,9 +49,10 @@ class SchedulerSpec extends AkkaSpec with BeforeAndAfterEach { val tickActor = actorOf(new Actor { def receive = { case Tick ⇒ countDownLatch.countDown() } }) + // run every 50 millisec - collectFuture(app.scheduler.scheduleOnce(tickActor, Tick, 50, TimeUnit.MILLISECONDS)) - collectFuture(app.scheduler.scheduleOnce(() ⇒ countDownLatch.countDown(), 50, TimeUnit.MILLISECONDS)) + collectTimeout(app.scheduler.scheduleOnce(tickActor, Tick, 50, TimeUnit.MILLISECONDS)) + collectTimeout(app.scheduler.scheduleOnce(() ⇒ countDownLatch.countDown(), 50, TimeUnit.MILLISECONDS)) // after 1 second the wait should fail assert(countDownLatch.await(2, TimeUnit.SECONDS) == false) @@ -87,9 +88,10 @@ class SchedulerSpec extends AkkaSpec with BeforeAndAfterEach { }) (1 to 10).foreach { i ⇒ - val future = collectFuture(app.scheduler.scheduleOnce(actor, Ping, 1, TimeUnit.SECONDS)) - future.cancel(true) + val timeout = collectTimeout(app.scheduler.scheduleOnce(actor, Ping, 1, TimeUnit.SECONDS)) + timeout.cancel() } + assert(ticks.await(3, TimeUnit.SECONDS) == false) //No counting down should've been made } @@ -114,9 +116,9 @@ class SchedulerSpec extends AkkaSpec with BeforeAndAfterEach { }) val actor = (supervisor ? props).as[ActorRef].get - collectFuture(app.scheduler.schedule(actor, Ping, 500, 500, TimeUnit.MILLISECONDS)) + collectTimeout(app.scheduler.schedule(actor, Ping, 500, 500, TimeUnit.MILLISECONDS)) // appx 2 pings before crash - collectFuture(app.scheduler.scheduleOnce(actor, Crash, 1000, TimeUnit.MILLISECONDS)) + collectTimeout(app.scheduler.scheduleOnce(actor, Crash, 1000, TimeUnit.MILLISECONDS)) assert(restartLatch.tryAwait(2, TimeUnit.SECONDS)) // should be enough time for the ping countdown to recover and reach 6 pings diff --git a/akka-actor/src/main/java/org/jboss/netty/akka/logging/AbstractInternalLogger.java b/akka-actor/src/main/java/org/jboss/netty/akka/logging/AbstractInternalLogger.java new file mode 100644 index 0000000000..555ea86f09 --- /dev/null +++ b/akka-actor/src/main/java/org/jboss/netty/akka/logging/AbstractInternalLogger.java @@ -0,0 +1,88 @@ +/* + * Copyright 2009 Red Hat, Inc. + * + * Red Hat licenses this file to you under the Apache License, version 2.0 + * (the "License"); you may not use this file except in compliance with the + * License. You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package org.jboss.netty.akka.logging; + +/** + * A skeletal implementation of {@link InternalLogger}. This class implements + * all methods that have a {@link InternalLogLevel} parameter by default to call + * specific logger methods such as {@link #info(String)} or {@link #isInfoEnabled()}. + * + * @author The Netty Project + * @author Trustin Lee + * @version $Rev: 2080 $, $Date: 2010-01-26 18:04:19 +0900 (Tue, 26 Jan 2010) $ + */ +public abstract class AbstractInternalLogger implements InternalLogger { + + /** + * Creates a new instance. + */ + protected AbstractInternalLogger() { + super(); + } + + public boolean isEnabled(InternalLogLevel level) { + switch (level) { + case DEBUG: + return isDebugEnabled(); + case INFO: + return isInfoEnabled(); + case WARN: + return isWarnEnabled(); + case ERROR: + return isErrorEnabled(); + default: + throw new Error(); + } + } + + public void log(InternalLogLevel level, String msg, Throwable cause) { + switch (level) { + case DEBUG: + debug(msg, cause); + break; + case INFO: + info(msg, cause); + break; + case WARN: + warn(msg, cause); + break; + case ERROR: + error(msg, cause); + break; + default: + throw new Error(); + } + } + + public void log(InternalLogLevel level, String msg) { + switch (level) { + case DEBUG: + debug(msg); + break; + case INFO: + info(msg); + break; + case WARN: + warn(msg); + break; + case ERROR: + error(msg); + break; + default: + throw new Error(); + } + } +} diff --git a/akka-actor/src/main/java/org/jboss/netty/akka/logging/InternalLogLevel.java b/akka-actor/src/main/java/org/jboss/netty/akka/logging/InternalLogLevel.java new file mode 100644 index 0000000000..c9963d26d6 --- /dev/null +++ b/akka-actor/src/main/java/org/jboss/netty/akka/logging/InternalLogLevel.java @@ -0,0 +1,42 @@ +/* + * Copyright 2009 Red Hat, Inc. + * + * Red Hat licenses this file to you under the Apache License, version 2.0 + * (the "License"); you may not use this file except in compliance with the + * License. You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package org.jboss.netty.akka.logging; + +/** + * The log level that {@link InternalLogger} can log at. + * + * @author The Netty Project + * @author Trustin Lee + * @version $Rev: 2080 $, $Date: 2010-01-26 18:04:19 +0900 (Tue, 26 Jan 2010) $ + */ +public enum InternalLogLevel { + /** + * 'DEBUG' log level. + */ + DEBUG, + /** + * 'INFO' log level. + */ + INFO, + /** + * 'WARN' log level. + */ + WARN, + /** + * 'ERROR' log level. + */ + ERROR; +} diff --git a/akka-actor/src/main/java/org/jboss/netty/akka/logging/InternalLogger.java b/akka-actor/src/main/java/org/jboss/netty/akka/logging/InternalLogger.java new file mode 100644 index 0000000000..19d3373e6c --- /dev/null +++ b/akka-actor/src/main/java/org/jboss/netty/akka/logging/InternalLogger.java @@ -0,0 +1,102 @@ +/* + * Copyright 2009 Red Hat, Inc. + * + * Red Hat licenses this file to you under the Apache License, version 2.0 + * (the "License"); you may not use this file except in compliance with the + * License. You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package org.jboss.netty.akka.logging; + +/** + * Internal-use-only logger used by Netty. DO NOT + * access this class outside of Netty. + * + * @author The Netty Project + * @author Trustin Lee + * + * @version $Rev: 2080 $, $Date: 2010-01-26 18:04:19 +0900 (Tue, 26 Jan 2010) $ + */ +public interface InternalLogger { + /** + * Returns {@code true} if a DEBUG level message is logged. + */ + boolean isDebugEnabled(); + + /** + * Returns {@code true} if an INFO level message is logged. + */ + boolean isInfoEnabled(); + + /** + * Returns {@code true} if a WARN level message is logged. + */ + boolean isWarnEnabled(); + + /** + * Returns {@code true} if an ERROR level message is logged. + */ + boolean isErrorEnabled(); + + /** + * Returns {@code true} if the specified log level message is logged. + */ + boolean isEnabled(InternalLogLevel level); + + /** + * Logs a DEBUG level message. + */ + void debug(String msg); + + /** + * Logs a DEBUG level message. + */ + void debug(String msg, Throwable cause); + + /** + * Logs an INFO level message. + */ + void info(String msg); + + /** + * Logs an INFO level message. + */ + void info(String msg, Throwable cause); + + /** + * Logs a WARN level message. + */ + void warn(String msg); + + /** + * Logs a WARN level message. + */ + void warn(String msg, Throwable cause); + + /** + * Logs an ERROR level message. + */ + void error(String msg); + + /** + * Logs an ERROR level message. + */ + void error(String msg, Throwable cause); + + /** + * Logs a message. + */ + void log(InternalLogLevel level, String msg); + + /** + * Logs a message. + */ + void log(InternalLogLevel level, String msg, Throwable cause); +} diff --git a/akka-actor/src/main/java/org/jboss/netty/akka/logging/InternalLoggerFactory.java b/akka-actor/src/main/java/org/jboss/netty/akka/logging/InternalLoggerFactory.java new file mode 100644 index 0000000000..bda537b35b --- /dev/null +++ b/akka-actor/src/main/java/org/jboss/netty/akka/logging/InternalLoggerFactory.java @@ -0,0 +1,155 @@ +/* + * Copyright 2009 Red Hat, Inc. + * + * Red Hat licenses this file to you under the Apache License, version 2.0 + * (the "License"); you may not use this file except in compliance with the + * License. You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package org.jboss.netty.akka.logging; + +import org.jboss.netty.akka.util.internal.StackTraceSimplifier; + +/** + * Creates an {@link InternalLogger} or changes the default factory + * implementation. This factory allows you to choose what logging framework + * Netty should use. The default factory is {@link JdkLoggerFactory}. + * You can change it to your preferred logging framework before other Netty + * classes are loaded: + *
+ * {@link org.jboss.netty.akka.logging.InternalLoggerFactory}.setDefaultFactory(new {@link Log4JLoggerFactory}());
+ *
+ * Please note that the new default factory is effective only for the classes
+ * which were loaded after the default factory is changed. Therefore,
+ * {@link #setDefaultFactory(org.jboss.netty.akka.logging.InternalLoggerFactory)} should be called as early
+ * as possible and shouldn't be called more than once.
+ *
+ * @author The Netty Project
+ * @author Trustin Lee
+ *
+ * @version $Rev: 2122 $, $Date: 2010-02-02 11:00:04 +0900 (Tue, 02 Feb 2010) $
+ *
+ * @apiviz.landmark
+ * @apiviz.has org.jboss.netty.logging.InternalLogger oneway - - creates
+ */
+public abstract class InternalLoggerFactory {
+ private static volatile InternalLoggerFactory defaultFactory = new JdkLoggerFactory();
+
+ static {
+ // Load the dependent classes in advance to avoid the case where
+ // the VM fails to load the required classes because of too many open
+ // files.
+ StackTraceSimplifier.simplify(new Exception());
+ }
+
+ /**
+ * Returns the default factory. The initial default factory is
+ * {@link JdkLoggerFactory}.
+ */
+ public static InternalLoggerFactory getDefaultFactory() {
+ return defaultFactory;
+ }
+
+ /**
+ * Changes the default factory.
+ */
+ public static void setDefaultFactory(InternalLoggerFactory defaultFactory) {
+ if (defaultFactory == null) {
+ throw new NullPointerException("defaultFactory");
+ }
+ InternalLoggerFactory.defaultFactory = defaultFactory;
+ }
+
+ /**
+ * Creates a new logger instance with the name of the specified class.
+ */
+ public static InternalLogger getInstance(Class> clazz) {
+ return getInstance(clazz.getName());
+ }
+
+ /**
+ * Creates a new logger instance with the specified name.
+ */
+ public static InternalLogger getInstance(String name) {
+ final InternalLogger logger = getDefaultFactory().newInstance(name);
+ return new InternalLogger() {
+
+ public void debug(String msg) {
+ logger.debug(msg);
+ }
+
+ public void debug(String msg, Throwable cause) {
+ StackTraceSimplifier.simplify(cause);
+ logger.debug(msg, cause);
+ }
+
+ public void error(String msg) {
+ logger.error(msg);
+ }
+
+ public void error(String msg, Throwable cause) {
+ StackTraceSimplifier.simplify(cause);
+ logger.error(msg, cause);
+ }
+
+ public void info(String msg) {
+ logger.info(msg);
+ }
+
+ public void info(String msg, Throwable cause) {
+ StackTraceSimplifier.simplify(cause);
+ logger.info(msg, cause);
+ }
+
+ public boolean isDebugEnabled() {
+ return logger.isDebugEnabled();
+ }
+
+ public boolean isErrorEnabled() {
+ return logger.isErrorEnabled();
+ }
+
+ public boolean isInfoEnabled() {
+ return logger.isInfoEnabled();
+ }
+
+ public boolean isWarnEnabled() {
+ return logger.isWarnEnabled();
+ }
+
+ public void warn(String msg) {
+ logger.warn(msg);
+ }
+
+ public void warn(String msg, Throwable cause) {
+ StackTraceSimplifier.simplify(cause);
+ logger.warn(msg, cause);
+ }
+
+ public boolean isEnabled(InternalLogLevel level) {
+ return logger.isEnabled(level);
+ }
+
+ public void log(InternalLogLevel level, String msg) {
+ logger.log(level, msg);
+ }
+
+ public void log(InternalLogLevel level, String msg, Throwable cause) {
+ StackTraceSimplifier.simplify(cause);
+ logger.log(level, msg, cause);
+ }
+ };
+ }
+
+ /**
+ * Creates a new logger instance with the specified name.
+ */
+ public abstract InternalLogger newInstance(String name);
+}
diff --git a/akka-actor/src/main/java/org/jboss/netty/akka/logging/JdkLogger.java b/akka-actor/src/main/java/org/jboss/netty/akka/logging/JdkLogger.java
new file mode 100644
index 0000000000..56d68e74a0
--- /dev/null
+++ b/akka-actor/src/main/java/org/jboss/netty/akka/logging/JdkLogger.java
@@ -0,0 +1,93 @@
+/*
+ * Copyright 2009 Red Hat, Inc.
+ *
+ * Red Hat licenses this file to you under the Apache License, version 2.0
+ * (the "License"); you may not use this file except in compliance with the
+ * License. You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations
+ * under the License.
+ */
+package org.jboss.netty.akka.logging;
+
+import java.util.logging.Level;
+import java.util.logging.Logger;
+
+/**
+ * java.util.logging
+ * logger.
+ *
+ * @author The Netty Project
+ * @author Trustin Lee
+ *
+ * @version $Rev: 2080 $, $Date: 2010-01-26 18:04:19 +0900 (Tue, 26 Jan 2010) $
+ *
+ */
+class JdkLogger extends AbstractInternalLogger {
+
+ private final Logger logger;
+ private final String loggerName;
+
+ JdkLogger(Logger logger, String loggerName) {
+ this.logger = logger;
+ this.loggerName = loggerName;
+ }
+
+ public void debug(String msg) {
+ logger.logp(Level.FINE, loggerName, null, msg);
+ }
+
+ public void debug(String msg, Throwable cause) {
+ logger.logp(Level.FINE, loggerName, null, msg, cause);
+ }
+
+ public void error(String msg) {
+ logger.logp(Level.SEVERE, loggerName, null, msg);
+ }
+
+ public void error(String msg, Throwable cause) {
+ logger.logp(Level.SEVERE, loggerName, null, msg, cause);
+ }
+
+ public void info(String msg) {
+ logger.logp(Level.INFO, loggerName, null, msg);
+ }
+
+ public void info(String msg, Throwable cause) {
+ logger.logp(Level.INFO, loggerName, null, msg, cause);
+ }
+
+ public boolean isDebugEnabled() {
+ return logger.isLoggable(Level.FINE);
+ }
+
+ public boolean isErrorEnabled() {
+ return logger.isLoggable(Level.SEVERE);
+ }
+
+ public boolean isInfoEnabled() {
+ return logger.isLoggable(Level.INFO);
+ }
+
+ public boolean isWarnEnabled() {
+ return logger.isLoggable(Level.WARNING);
+ }
+
+ public void warn(String msg) {
+ logger.logp(Level.WARNING, loggerName, null, msg);
+ }
+
+ public void warn(String msg, Throwable cause) {
+ logger.logp(Level.WARNING, loggerName, null, msg, cause);
+ }
+
+ @Override
+ public String toString() {
+ return loggerName;
+ }
+}
diff --git a/akka-actor/src/main/java/org/jboss/netty/akka/logging/JdkLoggerFactory.java b/akka-actor/src/main/java/org/jboss/netty/akka/logging/JdkLoggerFactory.java
new file mode 100644
index 0000000000..d4be45bf9d
--- /dev/null
+++ b/akka-actor/src/main/java/org/jboss/netty/akka/logging/JdkLoggerFactory.java
@@ -0,0 +1,38 @@
+/*
+ * Copyright 2009 Red Hat, Inc.
+ *
+ * Red Hat licenses this file to you under the Apache License, version 2.0
+ * (the "License"); you may not use this file except in compliance with the
+ * License. You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations
+ * under the License.
+ */
+package org.jboss.netty.akka.logging;
+
+
+/**
+ * Logger factory which creates a
+ * java.util.logging
+ * logger.
+ *
+ * @author The Netty Project
+ * @author Trustin Lee
+ *
+ * @version $Rev: 2080 $, $Date: 2010-01-26 18:04:19 +0900 (Tue, 26 Jan 2010) $
+ *
+ */
+public class JdkLoggerFactory extends InternalLoggerFactory {
+
+ @Override
+ public InternalLogger newInstance(String name) {
+ final java.util.logging.Logger logger =
+ java.util.logging.Logger.getLogger(name);
+ return new JdkLogger(logger, name);
+ }
+}
diff --git a/akka-actor/src/main/java/org/jboss/netty/akka/util/DebugUtil.java b/akka-actor/src/main/java/org/jboss/netty/akka/util/DebugUtil.java
new file mode 100644
index 0000000000..6038c8decd
--- /dev/null
+++ b/akka-actor/src/main/java/org/jboss/netty/akka/util/DebugUtil.java
@@ -0,0 +1,64 @@
+/*
+ * Copyright 2009 Red Hat, Inc.
+ *
+ * Red Hat licenses this file to you under the Apache License, version 2.0
+ * (the "License"); you may not use this file except in compliance with the
+ * License. You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations
+ * under the License.
+ */
+package org.jboss.netty.akka.util;
+
+import org.jboss.netty.akka.util.internal.SystemPropertyUtil;
+
+/**
+ * Determines if Netty is running in a debug mode or not. Please note that
+ * this is not a Java debug mode. You can enable Netty debug mode by
+ * specifying the {@code "org.jboss.netty.debug"} system property (e.g.
+ * {@code java -Dorg.jboss.netty.debug ...})
+ * + * If debug mode is disabled (default), the stack trace of the exceptions are + * compressed to help debugging a user application. + *
+ * If debug mode is enabled, the stack trace of the exceptions raised in + * {@link ChannelPipeline} or {@link ChannelSink} are retained as it is to help + * debugging Netty. + * + * @author The Netty Project + * @author Trustin Lee + * + * @version $Rev: 2080 $, $Date: 2010-01-26 18:04:19 +0900 (Tue, 26 Jan 2010) $ + */ +public class DebugUtil { + + /** + * Returns {@code true} if and only if Netty debug mode is enabled. + */ + public static boolean isDebugEnabled() { + String value; + try { + value = SystemPropertyUtil.get("org.jboss.netty.debug"); + } catch (Exception e) { + value = null; + } + + if (value == null) { + return false; + } + + value = value.trim().toUpperCase(); + return !value.startsWith("N") && + !value.startsWith("F") && + !value.equals("0"); + } + + private DebugUtil() { + // Unused + } +} diff --git a/akka-actor/src/main/java/org/jboss/netty/akka/util/HashedWheelTimer.java b/akka-actor/src/main/java/org/jboss/netty/akka/util/HashedWheelTimer.java new file mode 100644 index 0000000000..bc6e6d9829 --- /dev/null +++ b/akka-actor/src/main/java/org/jboss/netty/akka/util/HashedWheelTimer.java @@ -0,0 +1,555 @@ +/* + * Copyright 2009 Red Hat, Inc. + * + * Red Hat licenses this file to you under the Apache License, version 2.0 + * (the "License"); you may not use this file except in compliance with the + * License. You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package org.jboss.netty.akka.util; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashSet; +import java.util.List; +import java.util.Set; +import java.util.concurrent.Executors; +import java.util.concurrent.ThreadFactory; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.locks.ReadWriteLock; +import java.util.concurrent.locks.ReentrantReadWriteLock; + +import org.jboss.netty.akka.logging.InternalLogger; +import org.jboss.netty.akka.logging.InternalLoggerFactory; +import org.jboss.netty.akka.util.internal.ConcurrentIdentityHashMap; +import org.jboss.netty.akka.util.internal.ReusableIterator; +import org.jboss.netty.akka.util.internal.SharedResourceMisuseDetector; + +/** + * A {@link Timer} optimized for approximated I/O timeout scheduling. + * + *
+ * You can increase or decrease the accuracy of the execution timing by + * specifying smaller or larger tick duration in the constructor. In most + * network applications, I/O timeout does not need to be accurate. Therefore, + * the default tick duration is 100 milliseconds and you will not need to try + * different configurations in most cases. + * + *
More formally, if this map contains a mapping from a key {@code k} to
+ * a value {@code v} such that {@code key.equals(k)}, then this method
+ * returns {@code v}; otherwise it returns {@code null}. (There can be at
+ * most one such mapping.)
+ *
+ * @throws NullPointerException if the specified key is null
+ */
+ @Override
+ public V get(Object key) {
+ int hash = hashOf(key);
+ return segmentFor(hash).get(key, hash);
+ }
+
+ /**
+ * Tests if the specified object is a key in this table.
+ *
+ * @param key possible key
+ * @return true if and only if the specified object is a key in
+ * this table, as determined by the equals method;
+ * false otherwise.
+ * @throws NullPointerException if the specified key is null
+ */
+ @Override
+ public boolean containsKey(Object key) {
+ int hash = hashOf(key);
+ return segmentFor(hash).containsKey(key, hash);
+ }
+
+ /**
+ * Returns true if this map maps one or more keys to the specified
+ * value. Note: This method requires a full internal traversal of the hash
+ * table, and so is much slower than method containsKey.
+ *
+ * @param value value whose presence in this map is to be tested
+ * @return true if this map maps one or more keys to the specified
+ * value
+ * @throws NullPointerException if the specified value is null
+ */
+
+ @Override
+ public boolean containsValue(Object value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+
+ // See explanation of modCount use above
+
+ final Segment The value can be retrieved by calling the get method with a
+ * key that is equal to the original key.
+ *
+ * @param key key with which the specified value is to be associated
+ * @param value value to be associated with the specified key
+ * @return the previous value associated with key, or null
+ * if there was no mapping for key
+ * @throws NullPointerException if the specified key or value is null
+ */
+ @Override
+ public V put(K key, V value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ int hash = hashOf(key);
+ return segmentFor(hash).put(key, hash, value, false);
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @return the previous value associated with the specified key, or
+ * null if there was no mapping for the key
+ * @throws NullPointerException if the specified key or value is null
+ */
+ public V putIfAbsent(K key, V value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ int hash = hashOf(key);
+ return segmentFor(hash).put(key, hash, value, true);
+ }
+
+ /**
+ * Copies all of the mappings from the specified map to this one. These
+ * mappings replace any mappings that this map had for any of the keys
+ * currently in the specified map.
+ *
+ * @param m mappings to be stored in this map
+ */
+ @Override
+ public void putAll(Map extends K, ? extends V> m) {
+ for (Entry extends K, ? extends V> e: m.entrySet()) {
+ put(e.getKey(), e.getValue());
+ }
+ }
+
+ /**
+ * Removes the key (and its corresponding value) from this map. This method
+ * does nothing if the key is not in the map.
+ *
+ * @param key the key that needs to be removed
+ * @return the previous value associated with key, or null
+ * if there was no mapping for key
+ * @throws NullPointerException if the specified key is null
+ */
+ @Override
+ public V remove(Object key) {
+ int hash = hashOf(key);
+ return segmentFor(hash).remove(key, hash, null, false);
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @throws NullPointerException if the specified key is null
+ */
+ public boolean remove(Object key, Object value) {
+ int hash = hashOf(key);
+ if (value == null) {
+ return false;
+ }
+ return segmentFor(hash).remove(key, hash, value, false) != null;
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @throws NullPointerException if any of the arguments are null
+ */
+ public boolean replace(K key, V oldValue, V newValue) {
+ if (oldValue == null || newValue == null) {
+ throw new NullPointerException();
+ }
+ int hash = hashOf(key);
+ return segmentFor(hash).replace(key, hash, oldValue, newValue);
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @return the previous value associated with the specified key, or
+ * null if there was no mapping for the key
+ * @throws NullPointerException if the specified key or value is null
+ */
+ public V replace(K key, V value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ int hash = hashOf(key);
+ return segmentFor(hash).replace(key, hash, value);
+ }
+
+ /**
+ * Removes all of the mappings from this map.
+ */
+ @Override
+ public void clear() {
+ for (int i = 0; i < segments.length; ++ i) {
+ segments[i].clear();
+ }
+ }
+
+ /**
+ * Returns a {@link java.util.Set} view of the keys contained in this map. The set is
+ * backed by the map, so changes to the map are reflected in the set, and
+ * vice-versa. The set supports element removal, which removes the
+ * corresponding mapping from this map, via the Iterator.remove,
+ * Set.remove, removeAll, retainAll, and
+ * clear operations. It does not support the add or
+ * addAll operations.
+ *
+ * The view's iterator is a "weakly consistent" iterator that
+ * will never throw {@link java.util.ConcurrentModificationException}, and guarantees
+ * to traverse elements as they existed upon construction of the iterator,
+ * and may (but is not guaranteed to) reflect any modifications subsequent
+ * to construction.
+ */
+ @Override
+ public Set The view's iterator is a "weakly consistent" iterator that
+ * will never throw {@link java.util.ConcurrentModificationException}, and guarantees
+ * to traverse elements as they existed upon construction of the iterator,
+ * and may (but is not guaranteed to) reflect any modifications subsequent
+ * to construction.
+ */
+ @Override
+ public Collection The view's iterator is a "weakly consistent" iterator that
+ * will never throw {@link java.util.ConcurrentModificationException}, and guarantees
+ * to traverse elements as they existed upon construction of the iterator,
+ * and may (but is not guaranteed to) reflect any modifications subsequent
+ * to construction.
+ */
+ @Override
+ public Set