BurningBright

  • Home

  • Tags

  • Categories

  • Archives

  • Search

Tomcat Connector

Posted on 2017-09-13 | Edited on 2018-12-16 | In java

Tomcat do not you have a conscience.

connector

  1. Create a Connector, construct a Connector object.
  2. Call Connector initIntenal method
  3. ProtocolHanlder init method contain thread pool and a thread deal request
  4. Call Connector startIntenal
  5. Call ProtocolHandler start
  6. Call MapperListener start, listen changes

Detail

Protocol

Tomcat have two protocol handlers HTTP/1.1 AJP/1.3
config in server.xml

1
2
3
<Connector port="8080" address="0.0.0.0" protocol="HTTP/1.1"
connectionTimeout="20000" redirectPort="8443"/>
<Connector port="8010" protocol="AJP/1.3" redirectPort="8444" />

setProtocol(String protocol) set Http11AprProtocol in default.

Init

1
2
3
4
5
6
7
8
9
10
11
12
13
14
@Override
protected void initInternal() throws LifecycleException {
super.initInternal();
// Initialize adapter
adapter = new CoyoteAdapter(this);
protocolHandler.setAdapter(adapter);
......
try {
protocolHandler.init();
} catch (Exception e) {
......
}
mapperListener.init();
}

Call LifecycleMBeanBase‘s initInternal() regist self.
Create Adapter and set into handler
protocolHandler‘s concrete implements in abstract class AbstractProtocol
Implemented in AbstractAjpProtocol AbstractHttp11Protocol

Focus on AbstractProtocol:

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
/*
* NOTE: There is no maintenance of state or checking for valid transitions
* within this class. It is expected that the connector will maintain state
* and prevent invalid state transitions.
*/

@Override
public void init() throws Exception {
//set names
......
try {
endpoint.init();
} catch (Exception ex) {
......
}
}

endpoint.init();

  1. Setting recevie thread number and biggest connection number

    1
    2
    3
    4
    // Initialize thread count defaults for acceptor
    if (acceptorThreadCount == 0) {
    acceptorThreadCount = 1;
    }
  2. Create thread pool, start listener thread for client request

    1
    2
    3
    4
    5
    // Initialize maxConnections
    if (getMaxConnections() == 0) {
    // User hasn't set a value - use the default
    setMaxConnections(getMaxThreadsInternal());
    }
  3. Start a server socket

    1
    2
    3
    4
    if (getAddress() == null) {
    serverSocket = serverSocketFactory.createSocket(getPort(),
    getBacklog());
    }

Start Internal

  1. Connector -> startInternal()
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    /**
    * Begin processing requests via this Connector.
    */
    @Override
    protected void startInternal() throws LifecycleException {
    // Validate settings before starting
    ......
    setState(LifecycleState.STARTING);
    try {
    protocolHandler.start();
    } catch (Exception e) {
    ......
    }
    mapperListener.start();
    }

Call abstract protocol start

  1. AbstractProtocol -> start()
    1
    2
    3
    4
    5
    6
    7
    8
    9
    @Override
    public void start() throws Exception {
    ......
    try {
    endpoint.start();
    } catch (Exception ex) {
    ......
    }
    }

Call endpoint start

  1. AbstractEndpoint -> start()
    1
    2
    3
    4
    5
    6
    7
    public final void start() throws Exception {
    if (bindState == BindState.UNBOUND) {
    bind();
    bindState = BindState.BOUND_ON_START;
    }
    startInternal();
    }

Call itself’s abstract method startInternal().

  1. JIoEndpoint -> startInternal()
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    @Override
    public void startInternal() throws Exception {
    if (!running) {
    running = true;
    paused = false;

    // Create worker collection
    if (getExecutor() == null) {
    createExecutor();
    }

    initializeConnectionLatch();
    startAcceptorThreads();

    // Start async timeout thread
    Thread timeoutThread = new Thread(new AsyncTimeout(),
    getName() + "-AsyncTimeout");
    timeoutThread.setPriority(threadPriority);
    timeoutThread.setDaemon(true);
    timeoutThread.start();
    }
    }

Initialize connection
Start acceptor thread
Start an asynchronous thread deal request

  1. JIoEndpoint.AsyncTimeout -> run()
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    34
    35
    36
    37
    38
    39
    40
    41
    42
    43
    44
    45
    46
    47
    48
    49
    50
    51
    52
    53
    54
    55
    56
    57
    58
    59
    60
    61
    62
    63
    64
    /**
    * Async timeout thread
    */
    protected class AsyncTimeout implements Runnable {
    /**
    * The background thread that checks async requests and fires the
    * timeout if there has been no activity.
    */
    @Override
    public void run() {

    // Loop until we receive a shutdown command
    while (running) {
    // delay / get socket object/ check timeout
    ......
    while (sockets.hasNext()) {
    SocketWrapper<Socket> socket = sockets.next();
    ......
    processSocketAsync(socket,SocketStatus.TIMEOUT);
    }

    // Loop if endpoint is paused
    ......
    }
    }
    }

    /**
    * Process an existing async connection. If processing is required, passes
    * the wrapped socket to an executor for processing.
    *
    * @param socket The socket associated with the client.
    * @param status Only OPEN and TIMEOUT are used. The others are used for
    * Comet requests that are not supported by the BIO (JIO)
    * Connector.
    */
    @Override
    public void processSocketAsync(SocketWrapper<Socket> socket,
    SocketStatus status) {
    try {
    synchronized (socket) {
    if (waitingRequests.remove(socket)) {
    SocketProcessor proc = new SocketProcessor(socket,status);
    ClassLoader loader = Thread.currentThread().getContextClassLoader();
    try {
    //threads should not be created by the webapp classloader
    ......
    // During shutdown, executor may be null - avoid NPE
    if (!running) return;
    getExecutor().execute(proc);
    //TODO gotta catch RejectedExecutionException and properly handle it
    } finally {
    // doPrivileged
    ......
    }
    }
    }
    } catch (Throwable t) {
    ExceptionUtils.handleThrowable(t);
    // This means we got an OOM or similar creating a thread, or that
    // the pool and its queue are full
    log.error(sm.getString("endpoint.process.fail"), t);
    }
    }

org.apache.tomcat.util.net.JIoEndpoint.SocketProcessor it’s job is put
concrete request dispatch to org.apache.tomcat.util.net.JIoEndpoint.Handler.
Then based on handler‘s different return SocketState decide
whether close connection or do next loop.

  1. JIoEndpoint.SocketProcessor -> run()
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    34
    35
    36
    37
    38
    39
    40
    41
    42
    43
    44
    45
    46
    47
    48
    49
    50
    51
    52
    53
    54
    /**
    * This class is the equivalent of the Worker, but will simply use in an
    * external Executor thread pool.
    */
    ......
    @Override
    public void run() {
    boolean launch = false;
    synchronized (socket) {
    try {
    SocketState state = SocketState.OPEN;

    try {
    // SSL handshake
    serverSocketFactory.handshake(socket.getSocket());
    } catch (Throwable t) {
    ......
    }

    if ((state != SocketState.CLOSED)) {
    if (status == null) {
    state = handler.process(socket, SocketStatus.OPEN_READ);
    } else {
    state = handler.process(socket,status);
    }
    }
    if (state == SocketState.CLOSED) {
    // Close socket
    if (log.isTraceEnabled()) {
    log.trace("Closing socket:"+socket);
    }
    countDownConnection();
    try {
    socket.getSocket().close();
    } catch (IOException e) {
    // Ignore
    }
    } else if (state == SocketState.OPEN ||
    state == SocketState.UPGRADING ||
    state == SocketState.UPGRADING_TOMCAT ||
    state == SocketState.UPGRADED){
    socket.setKeptAlive(true);
    socket.access();
    launch = true;
    } else if (state == SocketState.LONG) {
    socket.access();
    waitingRequests.add(socket);
    }
    } finally {
    ......
    }
    socket = null;
    // Finish up this request
    }

state = handler.process(socket, SocketStatus.OPEN_READ);
call process

  1. AbstractProtocol.AbstractConnectionHandler -> process()
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    34
    35
    36
    37
    38
    39
    40
    41
    42
    43
    44
    45
    46
    47
    48
    49
    50
    51
    52
    53
    54
    55
    56
    57
    58
    59
    60
    61
    62
    63
    64
    65
    66
    67
    68
    69
    70
    71
    72
    73
    74
    75
    76
    77
    78
    79
    80
    81
    82
    83
    84
    85
    86
    87
    88
    89
    90
    91
    92
    93
    94
    95
    96
    97
    98
    99
    100
    101
    102
    103
    104
    105
    106
    107
    108
    109
    110
    111
    112
    113
    114
    115
    116
    117
    118
    119
    120
    121
    122
    123
    124
    125
    126
    127
    128
    129
    130
    131
    132
    133
    134
    135
    136
    137
    138
    139
    140
    141
    142
    143
    144
    145
    146
    147
    148
    149
    150
    151
    152
    153
    154
    155
    156
    157
    158
    159
    160
    161
    162
    163
    164
    165
    166
    167
    168
    169
    170
    171
    172
    173
    174
    175
    176
    177
    178
    179
    public SocketState process(SocketWrapper<S> wrapper,
    SocketStatus status) {
    if (wrapper == null) {
    // Nothing to do. Socket has been closed.
    return SocketState.CLOSED;
    }

    S socket = wrapper.getSocket();
    if (socket == null) {
    // Nothing to do. Socket has been closed.
    return SocketState.CLOSED;
    }

    Processor<S> processor = connections.get(socket);
    if (status == SocketStatus.DISCONNECT && processor == null) {
    // Nothing to do. Endpoint requested a close and there is no
    // longer a processor associated with this socket.
    return SocketState.CLOSED;
    }

    wrapper.setAsync(false);
    ContainerThreadMarker.markAsContainerThread();

    try {
    if (processor == null) {
    processor = recycledProcessors.poll();
    }
    if (processor == null) {
    processor = createProcessor();
    }

    initSsl(wrapper, processor);

    SocketState state = SocketState.CLOSED;
    do {
    if (status == SocketStatus.DISCONNECT &&
    !processor.isComet()) {
    // Do nothing here, just wait for it to get recycled
    // Don't do this for Comet we need to generate an end
    // event (see BZ 54022)
    } else if (processor.isAsync() || state == SocketState.ASYNC_END) {
    state = processor.asyncDispatch(status);
    if (state == SocketState.OPEN) {
    // release() won't get called so in case this request
    // takes a long time to process, remove the socket from
    // the waiting requests now else the async timeout will
    // fire
    getProtocol().endpoint.removeWaitingRequest(wrapper);
    // There may be pipe-lined data to read. If the data
    // isn't processed now, execution will exit this
    // loop and call release() which will recycle the
    // processor (and input buffer) deleting any
    // pipe-lined data. To avoid this, process it now.
    state = processor.process(wrapper);
    }
    } else if (processor.isComet()) {
    state = processor.event(status);
    } else if (processor.getUpgradeInbound() != null) {
    state = processor.upgradeDispatch();
    } else if (processor.isUpgrade()) {
    state = processor.upgradeDispatch(status);
    } else {
    state = processor.process(wrapper);
    }

    if (state != SocketState.CLOSED && processor.isAsync()) {
    state = processor.asyncPostProcess();
    }

    if (state == SocketState.UPGRADING) {
    // Get the HTTP upgrade handler
    HttpUpgradeHandler httpUpgradeHandler =
    processor.getHttpUpgradeHandler();
    // Release the Http11 processor to be re-used
    release(wrapper, processor, false, false);
    // Create the upgrade processor
    processor = createUpgradeProcessor(
    wrapper, httpUpgradeHandler);
    // Mark the connection as upgraded
    wrapper.setUpgraded(true);
    // Associate with the processor with the connection
    connections.put(socket, processor);
    // Initialise the upgrade handler (which may trigger
    // some IO using the new protocol which is why the lines
    // above are necessary)
    // This cast should be safe. If it fails the error
    // handling for the surrounding try/catch will deal with
    // it.
    httpUpgradeHandler.init((WebConnection) processor);
    } else if (state == SocketState.UPGRADING_TOMCAT) {
    // Get the UpgradeInbound handler
    org.apache.coyote.http11.upgrade.UpgradeInbound inbound =
    processor.getUpgradeInbound();
    // Release the Http11 processor to be re-used
    release(wrapper, processor, false, false);
    // Create the light-weight upgrade processor
    processor = createUpgradeProcessor(wrapper, inbound);
    inbound.onUpgradeComplete();
    }
    if (getLog().isDebugEnabled()) {
    getLog().debug("Socket: [" + wrapper +
    "], Status in: [" + status +
    "], State out: [" + state + "]");
    }
    } while (state == SocketState.ASYNC_END ||
    state == SocketState.UPGRADING ||
    state == SocketState.UPGRADING_TOMCAT);

    if (state == SocketState.LONG) {
    // In the middle of processing a request/response. Keep the
    // socket associated with the processor. Exact requirements
    // depend on type of long poll
    connections.put(socket, processor);
    longPoll(wrapper, processor);
    } else if (state == SocketState.OPEN) {
    // In keep-alive but between requests. OK to recycle
    // processor. Continue to poll for the next request.
    connections.remove(socket);
    release(wrapper, processor, false, true);
    } else if (state == SocketState.SENDFILE) {
    // Sendfile in progress. If it fails, the socket will be
    // closed. If it works, the socket either be added to the
    // poller (or equivalent) to await more data or processed
    // if there are any pipe-lined requests remaining.
    connections.put(socket, processor);
    } else if (state == SocketState.UPGRADED) {
    // Need to keep the connection associated with the processor
    connections.put(socket, processor);
    // Don't add sockets back to the poller if this was a
    // non-blocking write otherwise the poller may trigger
    // multiple read events which may lead to thread starvation
    // in the connector. The write() method will add this socket
    // to the poller if necessary.
    if (status != SocketStatus.OPEN_WRITE) {
    longPoll(wrapper, processor);
    }
    } else {
    // Connection closed. OK to recycle the processor. Upgrade
    // processors are not recycled.
    connections.remove(socket);
    if (processor.isUpgrade()) {
    processor.getHttpUpgradeHandler().destroy();
    } else if (processor instanceof org.apache.coyote.http11.upgrade.UpgradeProcessor) {
    // NO-OP
    } else {
    release(wrapper, processor, true, false);
    }
    }
    return state;
    } catch(java.net.SocketException e) {
    // SocketExceptions are normal
    getLog().debug(sm.getString(
    "abstractConnectionHandler.socketexception.debug"), e);
    } catch (java.io.IOException e) {
    // IOExceptions are normal
    getLog().debug(sm.getString(
    "abstractConnectionHandler.ioexception.debug"), e);
    }
    // Future developers: if you discover any other
    // rare-but-nonfatal exceptions, catch them here, and log as
    // above.
    catch (Throwable e) {
    ExceptionUtils.handleThrowable(e);
    // any other exception or error is odd. Here we log it
    // with "ERROR" level, so it will show up even on
    // less-than-verbose logs.
    getLog().error(
    sm.getString("abstractConnectionHandler.error"), e);
    }
    // Make sure socket/processor is removed from the list of current
    // connections
    connections.remove(socket);
    // Don't try to add upgrade processors back into the pool
    if (!(processor instanceof org.apache.coyote.http11.upgrade.UpgradeProcessor)
    && !processor.isUpgrade()) {
    release(wrapper, processor, true, false);
    }
    return SocketState.CLOSED;
    }

Anaylsis request’s head / row/ body

  1. AbstractHttp11Processor -> process()
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    34
    35
    36
    37
    38
    39
    40
    41
    42
    43
    44
    45
    46
    47
    48
    49
    50
    51
    52
    53
    54
    55
    56
    57
    58
    59
    60
    61
    62
    63
    64
    65
    66
    67
    68
    69
    70
    71
    72
    73
    74
    75
    76
    77
    78
    79
    80
    81
    82
    83
    84
    85
    86
    87
    88
    89
    90
    91
    92
    93
    94
    95
    96
    97
    98
    99
    100
    101
    102
    103
    104
    105
    106
    107
    108
    109
    110
    111
    112
    113
    114
    115
    116
    117
    118
    119
    120
    121
    122
    123
    124
    125
    126
    127
    128
    129
    130
    131
    132
    133
    134
    135
    136
    137
    138
    139
    140
    141
    142
    143
    144
    145
    146
    147
    148
    149
    150
    151
    152
    153
    154
    155
    156
    157
    158
    159
    160
    161
    162
    163
    164
    165
    166
    167
    168
    169
    170
    171
    172
    173
    174
    175
    176
    177
    178
    179
    180
    181
    182
    183
    184
    185
    186
    187
    188
    189
    190
    191
    192
    193
    194
    195
    196
    197
    198
    199
    200
    201
    202
    203
    204
    205
    206
    207
    208
    209
    210
    211
    212
    213
    214
    215
    216
    217
    218
    219
    220
    221
    222
    223
    224
    225
    226
    227
    228
    229
    230
    231
    232
    233
    234
    235
    236
    237
    238
    239
    240
    241
    242
    243
    /**
    * Process pipelined HTTP requests using the specified input and output
    * streams.
    *
    * @param socketWrapper Socket from which the HTTP requests will be read
    * and the HTTP responses will be written.
    *
    * @throws IOException error during an I/O operation
    */
    @Override
    public SocketState process(SocketWrapper<S> socketWrapper)
    throws IOException {
    RequestInfo rp = request.getRequestProcessor();
    rp.setStage(org.apache.coyote.Constants.STAGE_PARSE);

    // Setting up the I/O
    setSocketWrapper(socketWrapper);
    getInputBuffer().init(socketWrapper, endpoint);
    getOutputBuffer().init(socketWrapper, endpoint);

    // Flags
    keepAlive = true;
    comet = false;
    openSocket = false;
    sendfileInProgress = false;
    readComplete = true;
    if (endpoint.getUsePolling()) {
    keptAlive = false;
    } else {
    keptAlive = socketWrapper.isKeptAlive();
    }

    if (disableKeepAlive()) {
    socketWrapper.setKeepAliveLeft(0);
    }

    while (!getErrorState().isError() && keepAlive && !comet && !isAsync() &&
    upgradeInbound == null &&
    httpUpgradeHandler == null && !endpoint.isPaused()) {

    // Parsing the request header
    try {
    setRequestLineReadTimeout();

    if (!getInputBuffer().parseRequestLine(keptAlive)) {
    if (handleIncompleteRequestLineRead()) {
    break;
    }
    }

    if (endpoint.isPaused()) {
    // 503 - Service unavailable
    response.setStatus(503);
    setErrorState(ErrorState.CLOSE_CLEAN, null);
    } else {
    keptAlive = true;
    // Set this every time in case limit has been changed via JMX
    request.getMimeHeaders().setLimit(endpoint.getMaxHeaderCount());
    request.getCookies().setLimit(getMaxCookieCount());
    // Currently only NIO will ever return false here
    if (!getInputBuffer().parseHeaders()) {
    // We've read part of the request, don't recycle it
    // instead associate it with the socket
    openSocket = true;
    readComplete = false;
    break;
    }
    if (!disableUploadTimeout) {
    setSocketTimeout(connectionUploadTimeout);
    }
    }
    } catch (IOException e) {
    if (getLog().isDebugEnabled()) {
    getLog().debug(
    sm.getString("http11processor.header.parse"), e);
    }
    setErrorState(ErrorState.CLOSE_NOW, e);
    break;
    } catch (Throwable t) {
    ExceptionUtils.handleThrowable(t);
    UserDataHelper.Mode logMode = userDataHelper.getNextMode();
    if (logMode != null) {
    String message = sm.getString(
    "http11processor.header.parse");
    switch (logMode) {
    case INFO_THEN_DEBUG:
    message += sm.getString(
    "http11processor.fallToDebug");
    //$FALL-THROUGH$
    case INFO:
    getLog().info(message, t);
    break;
    case DEBUG:
    getLog().debug(message, t);
    }
    }
    // 400 - Bad Request
    response.setStatus(400);
    setErrorState(ErrorState.CLOSE_CLEAN, t);
    getAdapter().log(request, response, 0);
    }

    if (!getErrorState().isError()) {
    // Setting up filters, and parse some request headers
    rp.setStage(org.apache.coyote.Constants.STAGE_PREPARE);
    try {
    prepareRequest();
    } catch (Throwable t) {
    ExceptionUtils.handleThrowable(t);
    if (getLog().isDebugEnabled()) {
    getLog().debug(sm.getString(
    "http11processor.request.prepare"), t);
    }
    // 500 - Internal Server Error
    response.setStatus(500);
    setErrorState(ErrorState.CLOSE_CLEAN, t);
    getAdapter().log(request, response, 0);
    }
    }

    if (maxKeepAliveRequests == 1) {
    keepAlive = false;
    } else if (maxKeepAliveRequests > 0 &&
    socketWrapper.decrementKeepAlive() <= 0) {
    keepAlive = false;
    }

    // Process the request in the adapter
    if (!getErrorState().isError()) {
    try {
    rp.setStage(org.apache.coyote.Constants.STAGE_SERVICE);
    adapter.service(request, response);
    // Handle when the response was committed before a serious
    // error occurred. Throwing a ServletException should both
    // set the status to 500 and set the errorException.
    // If we fail here, then the response is likely already
    // committed, so we can't try and set headers.
    if(keepAlive && !getErrorState().isError() && (
    response.getErrorException() != null ||
    (!isAsync() &&
    statusDropsConnection(response.getStatus())))) {
    setErrorState(ErrorState.CLOSE_CLEAN, null);
    }
    setCometTimeouts(socketWrapper);
    } catch (InterruptedIOException e) {
    setErrorState(ErrorState.CLOSE_NOW, e);
    } catch (HeadersTooLargeException e) {
    getLog().error(sm.getString("http11processor.request.process"), e);
    // The response should not have been committed but check it
    // anyway to be safe
    if (response.isCommitted()) {
    setErrorState(ErrorState.CLOSE_NOW, e);
    } else {
    response.reset();
    response.setStatus(500);
    setErrorState(ErrorState.CLOSE_CLEAN, e);
    response.setHeader("Connection", "close"); // TODO: Remove
    }
    } catch (Throwable t) {
    ExceptionUtils.handleThrowable(t);
    getLog().error(sm.getString("http11processor.request.process"), t);
    // 500 - Internal Server Error
    response.setStatus(500);
    setErrorState(ErrorState.CLOSE_CLEAN, t);
    getAdapter().log(request, response, 0);
    }
    }

    // Finish the handling of the request
    rp.setStage(org.apache.coyote.Constants.STAGE_ENDINPUT);

    if (!isAsync() && !comet) {
    if (getErrorState().isError()) {
    // If we know we are closing the connection, don't drain
    // input. This way uploading a 100GB file doesn't tie up the
    // thread if the servlet has rejected it.
    getInputBuffer().setSwallowInput(false);
    } else {
    // Need to check this again here in case the response was
    // committed before the error that requires the connection
    // to be closed occurred.
    checkExpectationAndResponseStatus();
    }
    endRequest();
    }

    rp.setStage(org.apache.coyote.Constants.STAGE_ENDOUTPUT);

    // If there was an error, make sure the request is counted as
    // and error, and update the statistics counter
    if (getErrorState().isError()) {
    response.setStatus(500);
    }
    request.updateCounters();

    if (!isAsync() && !comet || getErrorState().isError()) {
    if (getErrorState().isIoAllowed()) {
    getInputBuffer().nextRequest();
    getOutputBuffer().nextRequest();
    }
    }

    if (!disableUploadTimeout) {
    if(endpoint.getSoTimeout() > 0) {
    setSocketTimeout(endpoint.getSoTimeout());
    } else {
    setSocketTimeout(0);
    }
    }

    rp.setStage(org.apache.coyote.Constants.STAGE_KEEPALIVE);

    if (breakKeepAliveLoop(socketWrapper)) {
    break;
    }
    }

    rp.setStage(org.apache.coyote.Constants.STAGE_ENDED);

    if (getErrorState().isError() || endpoint.isPaused()) {
    return SocketState.CLOSED;
    } else if (isAsync() || comet) {
    return SocketState.LONG;
    } else if (isUpgrade()) {
    return SocketState.UPGRADING;
    } else if (getUpgradeInbound() != null) {
    return SocketState.UPGRADING_TOMCAT;
    } else {
    if (sendfileInProgress) {
    return SocketState.SENDFILE;
    } else {
    if (openSocket) {
    if (readComplete) {
    return SocketState.OPEN;
    } else {
    return SocketState.LONG;
    }
    } else {
    return SocketState.CLOSED;
    }
    }
    }
    }

Read http request from socket, anaylsis request head,
construct Request Response Object, then call Adapter.service().
[raw 1115 adapter.service(request, response);]

adapter.service() will finish request row and body anaylsis, put
info package into Request Response Object.

Adapter org.apache.catalina.connector.CoyoteAdapter is the bridge between
Connector and Container.

When this step finished request have passed to Container from Connector

PS. First deal request with org.apache.coyote.Request is because it’s a
light-weight object in Tomcat.
It’s memory space can be easy recycle use by JVM.
Not put the job to Connector.Request is design for reduce burden[performence].

Connector.Request function for servlet is far more conplex than org.apache.coyote.Request

  1. CoyoteAdapter -> service()
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    34
    35
    36
    37
    38
    39
    40
    41
    42
    43
    44
    /**
    * Service method.
    */
    @Override
    public void service(org.apache.coyote.Request req,
    org.apache.coyote.Response res)
    throws Exception {

    Request request = (Request) req.getNote(ADAPTER_NOTES);
    Response response = (Response) res.getNote(ADAPTER_NOTES);

    if (request == null) {
    ......
    }

    if (connector.getXpoweredBy()) {
    response.addHeader("X-Powered-By", POWERED_BY);
    }

    boolean comet = false;
    boolean async = false;
    boolean postParseSuccess = false;

    try {
    // Parse and set Catalina and configuration specific
    // request parameters
    req.getRequestProcessor().setWorkerThreadName(Thread.currentThread().getName());
    postParseSuccess = postParseRequest(req, request, res, response);
    if (postParseSuccess) {
    //check valves if we support async
    request.setAsyncSupported(connector.getService().getContainer().getPipeline().isAsyncSupported());
    // Calling the container
    connector.getService().getContainer().getPipeline().getFirst().invoke(request, response);
    ......
    }

    }
    ......
    } catch (IOException e) {
    // Ignore
    } finally {
    ......
    }
    }

connector.getService().getContainer().getPipeline().getFirst().invoke(request, response);
pass request to container

Mapper Listener

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
@Override
public final synchronized void start() throws LifecycleException {

if (LifecycleState.STARTING_PREP.equals(state) || LifecycleState.STARTING.equals(state) ||
LifecycleState.STARTED.equals(state)) {
......
}

if (state.equals(LifecycleState.NEW)) {
init();
} else if (state.equals(LifecycleState.FAILED)) {
stop();
} else if (!state.equals(LifecycleState.INITIALIZED) &&
!state.equals(LifecycleState.STOPPED)) {
invalidTransition(Lifecycle.BEFORE_START_EVENT);
}

try {
setStateInternal(LifecycleState.STARTING_PREP, null, false);
startInternal();
if (state.equals(LifecycleState.FAILED)) {
// This is a 'controlled' failure. The component put itself into the
// FAILED state so call stop() to complete the clean-up.
stop();
} else if (!state.equals(LifecycleState.STARTING)) {
// Shouldn't be necessary but acts as a check that sub-classes are
// doing what they are supposed to.
invalidTransition(Lifecycle.AFTER_START_EVENT);
} else {
setStateInternal(LifecycleState.STARTED, null, false);
}
} catch (Throwable t) {
......
}
}
  1. Regist Connector itself
  2. Add Listener for module
  3. Add Containers’ mapping relation

Connctor start finished, damn!!!

tomcat system architecture

Posted on 2017-09-08 | Edited on 2018-12-16 | In java

Architecture

tomcat

  • Server used to manage service, for outside provide a interface’s service
    for inside maintain service’s collection.
    • manage services’ lifecycle
    • search service for request
    • stop service so on
  • Service consist by connector and container
    • Connector be responsible for give request to container
    • Container be responsible for deal with request

Server

Forcus on two method addService(Service service) and findService(String name)

org.apache.catalina.core.StandardServer

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
/**
* Add a new Service to the set of defined Services.
*
* @param service The Service to be added
*/
@Override
public void addService(Service service) {

service.setServer(this);

synchronized (servicesLock) {
Service results[] = new Service[services.length + 1];
System.arraycopy(services, 0, results, 0, services.length);
results[services.length] = service;
services = results;

if (getState().isAvailable()) {
try {
service.start();
} catch (LifecycleException e) {
// Ignore
}
}

// Report this property change to interested listeners
support.firePropertyChange("service", null, service);
}

}

  • set current service’s server
  • extends service array, place new service in the last index
  • check service’s lefe state
  • report event to interested listeners

Interface Lifecycle

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
start()
-----------------------------
| |
| init() |
NEW ->-- INITIALIZING |
| | | | ------------------<-----------------------
| | |auto | | |
| | \|/ start() \|/ \|/ auto auto stop() |
| | INITIALIZED -->-- STARTING_PREP -->- STARTING -->- STARTED -->--- |
| | | | |
| |destroy()| | |
| -->-----<-- ------------------------<-------------------------------- ^
| | | |
| | \|/ auto auto start() |
| | STOPPING_PREP ---->---- STOPPING ------>----- STOPPED ----->-----
| \|/ ^ | ^
| | stop() | | |
| | -------------------------- | |
| | | | |
| | | destroy() destroy() | |
| | FAILED ---->------ DESTROYING ---<----------------- |
| | ^ | |
| | destroy() | |auto |
| -------->----------------- \|/ |
| DESTROYED |
| |
| stop() |
--->------------------------------>------------------------------
  • Any state can transition to FAILED.

  • Calling start() while a component is in states STARTING_PREP, STARTING or
    STARTED has no effect.

  • Calling start() while a component is in state NEW will cause init() to be
    called immediately after the start() method is entered.

  • Calling stop() while a component is in states STOPPING_PREP, STOPPING or
    STOPPED has no effect.

  • Calling stop() while a component is in state NEW transitions the component
    to STOPPED. This is typically encountered when a component fails to start and
    does not start all its sub-components. When the component is stopped, it will
    try to stop all sub-components - even those it didn’t start.

  • Attempting any other transition will throw LifecycleException.

Anaylsis

1
2
3
4
5
6
7
8
I Lifecycle
|
--- A LifecycleBase
| |
| --- A LifecycleMBeanBase
| |
| --- ...
--- ...

All component in tomcat implemnts Lifecycle interface,
father level component manage sub-level component.
So when top component changes, state will transmit to all conponments.
Focus on anaylsis start progress.

org.apache.catalina.util.LifecycleBase

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
@Override
public final synchronized void start() throws LifecycleException {

......

try {
setStateInternal(LifecycleState.STARTING_PREP, null, false);
startInternal();

......

} catch (Throwable t) {
// This is an 'uncontrolled' failure so put the component into the
// FAILED state and throw an exception.
ExceptionUtils.handleThrowable(t);
setStateInternal(LifecycleState.FAILED, null, false);
throw new LifecycleException(sm.getString("lifecycleBase.startFail", toString()), t);
}
}

org.apache.catalina.core.StandardServer

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
/**
* Start nested components ({@link Service}s) and implement the requirements
* of {@link org.apache.catalina.util.LifecycleBase#startInternal()}.
*
* @exception LifecycleException if this component detects a fatal error
* that prevents this component from being used
*/
@Override
protected void startInternal() throws LifecycleException {

fireLifecycleEvent(CONFIGURE_START_EVENT, null);
setState(LifecycleState.STARTING);

globalNamingResources.start();

// Start our defined Services
synchronized (servicesLock) {
for (int i = 0; i < services.length; i++) {
services[i].start();
}
}
}

All sub-components will be notify through fireLifecycleEvent
All registed service will be start.

org.apache.catalina.util.LifecycleSupport

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
/**
* Notify all lifecycle event listeners that a particular event has
* occurred for this Container. The default implementation performs
* this notification synchronously using the calling thread.
*
* @param type Event type
* @param data Event data
*/
public void fireLifecycleEvent(String type, Object data) {

LifecycleEvent event = new LifecycleEvent(lifecycle, type, data);
LifecycleListener interested[] = listeners;
for (int i = 0; i < interested.length; i++)
interested[i].lifecycleEvent(event);

}

Be called[defined] in LifecycleBase , method name is the same.

Service

A service packaged multi connector and a container
We forcus on addConnector(Connector connector) setContainer(Container container)

org.apache.catalina.core.StandardService

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
/**
* Set the <code>Container</code> that handles requests for all
* <code>Connectors</code> associated with this Service.
*
* @param container The new Container
*/
@Override
public void setContainer(Container container) {

Container oldContainer = this.container;
if ((oldContainer != null) && (oldContainer instanceof Engine))
((Engine) oldContainer).setService(null);
this.container = container;
if ((this.container != null) && (this.container instanceof Engine))
((Engine) this.container).setService(this);
if (getState().isAvailable() && (this.container != null)) {
try {
this.container.start();
} catch (LifecycleException e) {
// Ignore
}
}
if (getState().isAvailable() && (oldContainer != null)) {
try {
oldContainer.stop();
} catch (LifecycleException e) {
// Ignore
}
}

// Report this property change to interested listeners
support.firePropertyChange("container", oldContainer, this.container);

}

  1. set new input container
  2. reversal setting container’s service
  3. if state ok, start container
  4. if old container exists, stop it
  5. report event
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
/**
* Add a new Connector to the set of defined Connectors, and associate it
* with this Service's Container.
*
* @param connector The Connector to be added
*/
@Override
public void addConnector(Connector connector) {

synchronized (connectorsLock) {
connector.setService(this);
Connector results[] = new Connector[connectors.length + 1];
System.arraycopy(connectors, 0, results, 0, connectors.length);
results[connectors.length] = connector;
connectors = results;

if (getState().isAvailable()) {
try {
connector.start();
} catch (LifecycleException e) {
log.error(sm.getString(
"standardService.connector.startFailed",
connector), e);
}
}

// Report this property change to interested listeners
support.firePropertyChange("connector", null, connector);
}

}
  1. Setting connector’s service.
  2. Similar with server manage services, use array storage connector.
  3. Extends connector array, place new connector in the last index, implements dynamic array.
  4. start connector, report event.

Design Model

observer
All registed observers listener will be call by reported event
Observer may do something, or just ignore.

997.2 vs 997.1 Turbo

Posted on 2017-09-07 | Edited on 2018-12-16 | In blog

(6speedonline)[https://www.6speedonline.com/forums/997-turbo-gt2/257577-997-2-vs-997-1-turbo.html]
997.2 vs 997.1 Turbo
In a recent discussion, someone asked me about 997.2 versus 997.1 Turbo.
Up until now, I have thought that any new model Porsche will be “better” than
previous model and will at least make it look obsolete.
(And if you’ve read my criticism of stock 997.1 Turbo’s suspension and exhaust,
you know I don’t pull punches just because I own this car.) It seemed the answer
should be obvious, newer is better. However the 997.1 Turbo has a huge trump card,
and that is the Mezger engine. Read up on google but essentially the Mezger engine
of 997.1 (named after the engineer who designed it) is easily Porsche’s most famous
engine in its storied history, with tremendous pedigree and heritage.
It is race derived, responsible for many victories, and its worth is well proven over
many years by Porsche’s racing teams. Between new turbo engine of 997.2
versus old Mezger turbo engine of 997.1, I used to think I don’t care which
engine as long as power is delivered, but the more I think about it, the
more I am leaning towards changing my opinion.
In another thread upgrading 997.2 turbo 6speed manual Tom of Champion was talking
about the internals of 997.2 Turbo engine and mentioned restriction of fuel system
and rods testing 25% weaker. What crossed my mind reading his post is…what ELSE
is weaker in the 997.2 engine?

The only exception I would add is this: with respect to suspension,
if you are NOT going to mod the suspension, then the 997.2 Turbo is the better choice.
997.2’s PASM system is the second generation and is much better than 997.1, which was
Porsche’s first attempt at PASM and somewhat of a design flaw
(why a number of people here use Bilstein coilover or lowering springs).
997.1’s PASM is either way too soft or way too stiff between the 2 suspension settings,
997.2’s PASM is a lot more usable and smoother on the stiffer setting.
There were a number of other revisions (spring rate, sway bar, PTM system)
that make 997.2 better - the advantage of 4 years of experience.
In order of suspension ranking I would rank thus, not for everyone,
only for those looking for a more aggressive handling car:
Bilstein 997.2 > Bilstein 997.1 > stock 997.2 >>> stock 997.1.

So, perhaps 997.1 Turbo, mainly because of the Mezger engine,
will go down in history as the best Turbo ever?

Perhaps owners of these cars should hold on and not sell?
Perhaps potential owners should look for low mileage samples of 997.1,
and not 997.2? For sure, for the purpose of modding,
there will NEVER again be an engine that is as mod-friendly and as
robust as the Mezger engine.(It is simply too expensive to be made nowadays.)
Unlike the Mezger GT3 engine, which has problem with RMS leak,
the Mezger Turbo is THE perfect engine.

Bottom line: IMHO anyone here looking to buy a used Turbo, if you see a low mile 997.1 Turbo,
with PCCB/Chrono/Adaptive Sport Seat — jump on it!
All 3 years of Mezger Turbo’s,
2007 to 2009,are remarkably trouble free. Just look at this board (versus a BMW board for example)
then it should become obvious to you. The most common problem is leaking from the powered
steering and brake slave cylinder, but this could be easily and cheaply fixed and obviously
minor compared to other potential problems with drivetrain, engine, electronics, etc.

In looking at used cars, IMHO it is more important to get the DME readout and
check for overrev’s,and look for a CPO, low mileage, non-modded car, preferably with PCCB.
2009 is most desirable because it has the new PCM 2 with bluetooth and ipod functions and touchscreen.
You can add after-market bluetooth and ipod functions to 2007 and 2008 Turbo’s
(I use MoBridge in my 2008, work well in general and do all the functions but I don’t recommend
it because it has a beeping noise, a design flaw; Dension is probably the best choice at this point.)
but as in all after-market products,
they would have glitches here and there and will never be as good as OEM.
My opinions-YMMV:

Intellij IDEA Debug

Posted on 2017-09-07 | Edited on 2020-06-17 | In tool

Customize shortcut keymap

change keymap
shortcut key

change intellij’s keymap to eclipse’s

  • F5 step into a method
  • Ctrl + F5 smart step in
  • F6 step over a row
  • F7 step out a method
  • Ctrl + R step to cursor’s raw

Samrt step in

smart step in
Ctrl + F5
when a series method link a callback, smart step in debug is useful.

Thread Block

break point
In default when current thread met breakpoint, other thread will be blocked.
Util current debug thread finished.
If project need to debug in multi thread, set breakpoint the suspend in thread.

Frames

frame opt
Java’s memory model is a thread have multi methods, a method corresponding a frame.
Every frame have a private variable array, it’s 0 index is this.[Stacks Link Invocate]
When drop the frame, debug will roll back.

Thread List

thread list
Show thread list, thread status.

Thread’s Frames

switch thread
Used to switch thread, but it seem can’t debug a thread’s code alone.

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
public class DebugTest {

private static Object lock = new Object();

@Test
public void main() {
try {
for(int i=0; i< 3; i++) {
new Line().start();
Thread.sleep(1000);
}
new LineAnother().start();
Thread.sleep(100000);
} catch (InterruptedException e) {
e.printStackTrace();
}
}
}
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
class Line extends Thread {
public void run() {
synchronized (lock) {
// smart step in
if (Thread.currentThread().getContextClassLoader().equals(new Object()))
System.out.println("-_-!");

for (int i = 0; i < 3; i++)
System.out.println(Thread.currentThread().getId() +
"\t" + Thread.currentThread().getName() +
"\t" + i);
test(5);
}
}
// sub frame
public int test(int i) {
i -= 1;
return i;
}
}
1
2
3
4
5
6
class LineAnother extends Thread {
public void run() {
System.out.println("xxx");
System.out.println("ooo");
}
}

linux process job session control

Posted on 2017-08-31 | Edited on 2018-12-16 | In linux

Process group

Every process have a process id and a process group id
Every process group have a leader process[id is same as group]

Each process in group have relations, like a family tree.

All processes’ root is init process which id is 1
Process group will exist when process exist in group.

1
2
3
4
5
6
pi@raspberrypi:~ $ ps axj |grep "\s1\s"
0 1 1 1 ? -1 Ss 0 2:25 /sbin/init
1 145 145 145 ? -1 Ss 0 0:27 /lib/systemd/systemd-journald
1 150 150 150 ? -1 Ss 0 0:00 /lib/systemd/systemd-udevd
......
1 853 853 853 ? -1 Ss 0 0:00 /usr/sbin/sshd -D

Jobs

Shell control frontground and background by job or process group but not process.

A job can be composed by one or more process group.

A shell can run a frontground job and multi background jobs.

Use pip command | run multi process, & make job background.

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
pi@raspberrypi:~ $ sleep 1000|more|less &
[1] 12033
pi@raspberrypi:~ $ ps axj | head -n1
PPID PID PGID SID TTY TPGID STAT UID TIME COMMAND
pi@raspberrypi:~ $ ps axj |grep 12033
12031 12033 12033 12033 pts/0 12068 Ss 1000 0:00 -bash
12033 12061 12061 12033 pts/0 12068 T 1000 0:00 sleep 1000
12033 12062 12061 12033 pts/0 12068 T 1000 0:00 more
12033 12063 12061 12033 pts/0 12068 T 1000 0:00 less
12033 12068 12068 12033 pts/0 12068 R+ 1000 0:00 ps axj
12033 12069 12068 12033 pts/0 12068 S+ 1000 0:00 grep --color=auto 12033

pi@raspberrypi:~ $ ps axj |grep 12031
12024 12031 12024 12024 ? -1 S 1000 0:00 sshd: pi@pts/0
12031 12033 12033 12033 pts/0 12070 Ss 1000 0:00 -bash
12033 12071 12070 12033 pts/0 12070 S+ 1000 0:00 grep --color=auto 12031

pi@raspberrypi:~ $ ps axj |grep 12024
853 12024 12024 12024 ? -1 Ss 0 0:00 sshd: pi [priv]
12024 12031 12024 12024 ? -1 S 1000 0:00 sshd: pi@pts/0
12033 12074 12073 12033 pts/0 12073 S+ 1000 0:00 grep --color=auto 12024

pi@raspberrypi:~ $ ps axj |grep 853
1 853 853 853 ? -1 Ss 0 0:00 /usr/sbin/sshd -D
853 12024 12024 12024 ? -1 Ss 0 0:00 sshd: pi [priv]
12033 12076 12075 12033 pts/0 12075 S+ 1000 0:00 grep --color=auto 853

Difference between job and process group:
Sub process not belong to main process’s job alone.

Session

Session composed by one or more process group.[job?]
A session can have control terminal.

Builde a connection and connect with terminal’s session fist
process be called the control process bash in normal.

  • one control process bash
  • one frontground process
  • any number background process

Job control

job control is new charcter be added in 1980 by BSD.
It allow terminal execute multi process group.

  • need job control shell
  • kernal’s terminal driver support job control
  • kernal support job’s control signal

Signal

name opt signal
Interrupt Ctrl + c SIGINT
Quit Ctrl + \ SIGQUIT
Hang up Ctrl + z SIGSTP

Input Output

Only frontground process can read terminal’s input data.

When background process want to read data from terminal
it is not a error behaviour, just not be allowed.

When kernal driver detected background process want read
it will send a SIGTSTP signal to background job to stop process.

Then use fg command change process to front and run
[change to front and put SIGCONT signal to this group]

Background process write data to terminal can be selfdefine
stty tostop command formidden background write data to terminal.

1
2
3
4
5
6
7
8
9
10
11
12
#include<stdio.h>
#include<stdlib.h>
int main()
{
char args[10];
while(1)
{
scanf("%s",&args);
printf("%s\n",args);
}
return 0;
}

linux execute in background

Posted on 2017-08-30 | Edited on 2018-12-16 | In linux

link

Problem

Always meet problem like this:
Login in by telnet/ssh, run some time costs task
But be interrupted, cause by the net problem.

How to deal this kind problem, let task run background

Command nohup/setsid/&

  1. nohup

in early version UNIX terminal communicate with system by
modem[cat?]. when logout or disconnect, modem send a hang up
signal to system to close current session process and all sub processes.

When terminal logout or connect broke, terminal recieves a ‘HUP[hangup]’ signal.
So we have two solutions

  • let process ignore ‘HUP’ signal
  • change process’s owner
1
2
nohup ./sunny.sh clientid xxxxx &
nohup ./sunny.sh clientid > sunny.log 2>&1 &
  1. setsid

Change process’s owner to process id ‘1’

1
setid ./sunny.sh clientid xxxxx
  1. &

When command put in () , command can be run in sub shell.
Put & into () as well, the commited job wouldn’t be add in job list.
So command can ignore the HUP

1
(./sunny.sh clientid xxxxx &)
  1. disown

If a process already running

  • disown -h make a job ignore signal
  • disown -ah make all jobs ignore signal
  • disown -rh make running jobs ignore signal

if job already run in background

1
2
3
4
5
6
7
./sunny.sh clientid xxxxx &

[1]+ Running ./sunny.sh clientid xxxxx &

disown -h %1

ps -ef |grep sunny

if job already run in front

1
2
3
4
5
6
7
8
9
10
11
12
./sunny.sh clientid xxxxx

# [ctrl + z]
[1]+ Stopped ./sunny.sh clientid xxxxx

# put to background
bg %1
[1]+ Running ./sunny.sh clientid xxxxx &

disown -h %1

ps -ef |grep sunny

Java memory anaylsis

Posted on 2017-08-22 | Edited on 2018-12-16 | In java

Machine addresses

Many data structures involve representation of machine addresses.

The typical x64 architectures we widely used, need 8 bytes to represent address.

Many older machines use a 32-bit architecture that
would involve just 4 bytes per machine address.

Primary type

type bytes
byte 1
boolean 1
char 2
int 4
float 4
double 8
long 8

Object

Object

The overhead includes:

  • a reference to the object’s class
  • garbage collection information
  • synchronization information

List

LinkedList:

Object

Instance :

  • 16 bytes of object overhead
  • 8 bytes for first node reference
  • 4 bytes counter
  • 4 bytes padding

Node :

  • 16 bytes of object overhead
  • 8 bytes for the references to the Item
  • 8 bytes for the next to the Item
  • 8 bytes for the extra overhead

total :
32 + (40 + size(Ojbect)) * N

Array

Object

type bytes
int[N] 24 + 4N
Integer[N] 24 + 32N
int[M][N] 24 + 8 * (24 + 4N)
Integer[M][N] 24 + 8 * (24 + 32N)

String

1
2
3
4
5
6
7
8
9
10
11
12
13
14
// 16 bytes
public class String {
// reference 8 bytes
// 24 + 2N bytes
private char[] value;
// 4 bytes
private int offset;
// 4 bytes
private int count;
// 4 bytes
private int hash;
...
// padding 4 bytes
}

Total : 64 + 2N


HashMap

Object
array length : M
average node size : N
Total[structure] : 64M + 48N


Tree

Object
Total[structure] : 56N

python edit file

Posted on 2017-08-21 | Edited on 2018-12-16 | In python
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
import re
import os
import os.path

dirpath = 'F:\\Users'
pattern = '</head>'
css = '<link rel="stylesheet" href="./style.css"></head>'

for file in os.listdir(dirpath):
# read all text
origin = open(os.path.join(dirpath, file), 'r+').readlines()
# edit target line
origin[0] = re.compile(pattern).sub(css, origin[0])
# rewrite whole file
open(os.path.join(dirpath, file), 'w+').writelines(origin)
  1. os.listdir(dir) list single node’s sub directory
  2. os.path.join(a, b) conbine two path string
  3. re.compile(pattern).sub(result, src) replace string

Tomcat static resources

Posted on 2017-08-20 | Edited on 2018-12-16 | In java

Config

$CATALINA_HOME\conf\web.conf

1
2
3
4
5
6
7
8
9
10
11
12
13
<servlet>
<servlet-name>default</servlet-name>
<servlet-class>org.apache.catalina.servlets.DefaultServlet</servlet-class>
<init-param>
<param-name>debug</param-name>
<param-value>0</param-value>
</init-param>
<init-param>
<param-name>listings</param-name>
<param-value>false</param-value>
</init-param>
<load-on-startup>1</load-on-startup>
</servlet>

When param listings is true, / can list all files

1
2
3
4
5
<!-- The mapping for the default servlet -->
<servlet-mapping>
<servlet-name>default</servlet-name>
<url-pattern>/</url-pattern>
</servlet-mapping>

The pattern mapping is this, for all no mapping servlet-mapping request.

Process

1
2
3
4
5
6
protected void doGet(HttpServletRequest request,
HttpServletResponse response)
throws IOException, ServletException {
// Serve the requested resource, including the data content
serveResource(request, response, true, fileEncoding);
}
  1. judge resource existence
  2. judge file readable
  3. set response head content-type base on file type
  4. judge file time
  5. set response timeout
  6. stream operation

tomcat

python basic 1

Posted on 2017-08-19 | Edited on 2018-12-16 | In python

Replace String

  1. replace function

    1
    2
    a = 'hello word'
    b = a.replace('word','python')
  2. regex

    1
    2
    3
    4
    import re
    a = 'hello word'
    pattern = re.compile('word')
    b = pattern.sub('python',a)

Change line inside

  1. three single qute ‘’’
  2. three double qute “””
  3. one backslash \

Thread sleep

1
2
import time
time.sleep(7)

Real time print

1
2
3
4
5
import time
import sys
print 'hello word'
sys.stdout.flush()
time.sleep(7)

Basic function

type transfer

  • int()

    1
    2
    int('7')
    int('17', 16)
  • str()

    1
    2
    str(7)
    hex(17)

string

  1. remove blank / special characters
    s.strip().lstrip().rstrip(',')

  2. copy

    1
    2
    3
    4
    5
    #strcpy(sStr1,sStr2)
    sStr1 = 'strcpy'
    sStr2 = sStr1
    sStr1 = 'strcpy2'
    print sStr2
  3. concat

    1
    2
    3
    4
    5
    #strcat(sStr1,sStr2)
    sStr1 = 'strcat'
    sStr2 = 'append'
    sStr1 += sStr2
    print sStr1
  4. find

    1
    2
    3
    4
    5
    6
    #strchr(sStr1,sStr2)
    # < 0 not found
    sStr1 = 'strchr'
    sStr2 = 's'
    nPos = sStr1.index(sStr2)
    print nPos
  5. compare

    1
    2
    3
    4
    #strcmp(sStr1,sStr2)
    sStr1 = 'strchr'
    sStr2 = 'strch'
    print cmp(sStr1,sStr2)
  6. scan 4 contain

    1
    2
    3
    4
    5
    #strspn(sStr1,sStr2)
    sStr1 = '12345678'
    sStr2 = '456'
    #sStr1 and chars both in sStr1 and sStr2
    print len(sStr1 and sStr2)
  7. length

    1
    2
    3
    #strlen(sStr1)
    sStr1 = 'strlen'
    print len(sStr1)
  8. case

    1
    2
    3
    4
    5
    #strlwr(sStr1)
    sStr1 = 'JCstrlwr'
    sStr1 = sStr1.upper()
    #sStr1 = sStr1.lower()
    print sStr1
  9. add pointed string

    1
    2
    3
    4
    5
    6
    #strncat(sStr1,sStr2,n)
    sStr1 = '12345'
    sStr2 = 'abcdef'
    n = 3
    sStr1 += sStr2[0:n]
    print sStr1
  10. compare pointed string

    1
    2
    3
    4
    5
    #strncmp(sStr1,sStr2,n)
    sStr1 = '12345'
    sStr2 = '123bc'
    n = 3
    print cmp(sStr1[0:n],sStr2[0:n])
  11. copy pointed string

    1
    2
    3
    4
    5
    6
    #strncpy(sStr1,sStr2,n)
    sStr1 = ''
    sStr2 = '12345'
    n = 3
    sStr1 = sStr2[0:n]
    print sStr1
  12. replace pointed string

    1
    2
    3
    4
    5
    6
    #strnset(sStr1,ch,n)
    sStr1 = '12345'
    ch = 'r'
    n = 3
    sStr1 = n * ch + sStr1[3:]
    print sStr1
  13. scan string

    1
    2
    3
    4
    5
    6
    7
    8
    9
    #strpbrk(sStr1,sStr2)
    sStr1 = 'cekjgdklab'
    sStr2 = 'gka'
    nPos = -1
    for c in sStr1:
    if c in sStr2:
    nPos = sStr1.index(c)
    break
    print nPos
  14. reverse string

    1
    2
    3
    4
    #strrev(sStr1)
    sStr1 = 'abcdefg'
    sStr1 = sStr1[::-1]
    print sStr1
  15. split string

    1
    2
    3
    4
    5
    6
    7
    8
    #strtok(sStr1,sStr2)
    sStr1 = 'ab,cde,fgh,ijk'
    sStr2 = ','
    sStr1 = sStr1[sStr1.find(sStr2) + 1:]
    print sStr1
    #or
    s = 'ab,cde,fgh,ijk'
    print(s.split(','))
  16. connect string

    1
    2
    3
    delimiter = ','
    mylist = ['Brazil', 'Russia', 'India', 'China']
    print delimiter.join(mylist)
1…161718…29

Leon

282 posts
20 categories
58 tags
GitHub
Links
  • clock
  • typing-cn
  • mathjax
  • katex
  • cron
  • dos
  • keyboard
  • regex
  • sql
  • toy
© 2017 – 2024 Leon
Powered by Hexo v3.9.0
|
Theme – NexT.Muse v7.1.2