diff --git a/README.md b/README.md
index 64dece33ea1b020705fbef4e15989bb963b72116..5058a7abca4be83cf97663ec1e2c56099c5d818f 100644
--- a/README.md
+++ b/README.md
@@ -1,9 +1,9 @@
 # headless-chrome-crawler
-Headless Chrome crawler for Node.js Powered by [Puppeteer](https://github.com/GoogleChrome/puppeteer)
+Headless Chrome Crawler for Node.js Powered by [Puppeteer](https://github.com/GoogleChrome/puppeteer)
 
 ## Features
 
-Crawlers based on simple requests to html files are generally fast. However, it sometimes end up just capturing empty bodies, especially when the websites are built on such modern frontend frameworks as AngularJS, ReactJS and Vue.js.
+Crawlers based on simple requests to html files are generally fast. However, it sometimes end up capturing empty bodies, especially when the websites are built on such modern frontend frameworks as AngularJS, ReactJS and Vue.js.
 
 Powered by [Puppeteer](https://github.com/GoogleChrome/puppeteer), headless-chrome-crawler allows you to scrape those single page applications with the following features:
 
@@ -21,18 +21,19 @@ Powered by [Puppeteer](https://github.com/GoogleChrome/puppeteer), headless-chro
 
 ```
 yarn add headless-chrome-crawler
+# or "npm i headless-chrome-crawler"
 ```
 
-> **Note**: headless-chrome-crawler is powered by [Puppeteer](https://github.com/GoogleChrome/puppeteer). With installation, it automatically downloads a recent version of Chromium. To skip the download, see [Environment variables](https://github.com/GoogleChrome/puppeteer/blob/master/docs/api.md#environment-variables).
+> **Note**: headless-chrome-crawler is powered by [Puppeteer](https://github.com/GoogleChrome/puppeteer). While installation, it automatically downloads a recent version of Chromium. To skip the download, see [Environment variables](https://github.com/GoogleChrome/puppeteer/blob/master/docs/api.md#environment-variables).
 
 ### Usage
 
-The API of headless-chrome-crawler is inspired by that of [node-crawler](https://github.com/bda-research/node-crawler), so the API design is very similar but not exactly compatible.
+The basic API of headless-chrome-crawler is inspired by that of [node-crawler](https://github.com/bda-research/node-crawler), so the API design is somewhat similar but not exactly compatible.
 
 ```js
 const HCCrawler = require('headless-chrome-crawler');
 
-const hccrawler = new HCCrawler({
+HCCrawler.launch({
   evaluatePage: (() => ({
     title: $('title').text(),
     h1: $('h1').text(),
@@ -41,13 +42,14 @@ const hccrawler = new HCCrawler({
   onSuccess: (result => {
     console.log('onSuccess', result); // resolves status, options and evaluated result.
   }),
-});
-
-hccrawler.launch()
-  .then(() => {
-    hccrawler.queue('https://example.com');
-    hccrawler.queue(['https://example.net', 'https://example.org']);
-    hccrawler.queue({
+})
+  .then(crawler => {
+    // Queue a single request
+    crawler.queue('https://example.com');
+    // Queue multiple requests
+    crawler.queue(['https://example.net', 'https://example.org']);
+    // Queue a query custom options
+    crawler.queue({
       jQuery: false,
       url: 'https://example.com',
       evaluatePage: (() => ({
@@ -56,9 +58,10 @@ hccrawler.launch()
         p: document.getElementsByTagName('p')[0].innerText
       })),
     });
-    return hccrawler.onIdle();
-  })
-  .then(() => hccrawler.close());
+    // Called when no queue is left
+    crawler.onIdle()
+      .then(() => crawler.close());
+  });
 ```
 
 ## Examples
@@ -70,89 +73,112 @@ See [here](https://github.com/yujiosaka/headless-chrome-crawler/tree/master/exam
 ### Table of Contents
 
 * [class: HCCrawler](#class-hccrawler)
-  * [hccrawler.launch([options])](#hccrawlerlaunchoptions)
-  * [hccrawler.queue([options])](#hccrawlerqueueoptions)
-  * [hccrawler.onIdle()](#hccrawleronidle)
-  * [hccrawler.close()](#hccrawlerclose)
-  * [hccrawler.queuesize](#hccrawlerqueuesize)
+  * [HCCrawler.connect([options])](#hccrawlerconnectoptions)
+  * [HCCrawler.launch([options])](#hccrawlerlaunchoptions)
+* [class: Crawler](#class-crawler)
+  * [crawler.queue([options])](#crawlerqueueoptions)
+  * [crawler.close()](#crawlerclose)
+  * [crawler.onIdle()](#crawleronidle)
+  * [crawler.queueSize](#crawlerqueuesize)
 
 ### class: HCCrawler
 
-You can pass the following options to the constructor.
-Concurrency can only be set in the constructor, but other options can be overridden by each [hccrawler.queue](#hccrawlerqueueoptions)'s options
+HCCrawler provides a method to launch a crawler. It extends [Puppeteer class](https://github.com/GoogleChrome/puppeteer/blob/master/docs/api.md#class-puppeteer), so any methods like `HCCrawler.executablePath()` are available.
+
+#### HCCrawler.connect([options])
+
+* `options` <[Object]>
+  * `concurrency` <[number]> Maximum number of pages to open concurrently, defaults to `10`.
+* returns: <Promise<Crawler>> Promise which resolves to Crawler instance.
+
+This method connects to an existing Chromium instance. The following options are passed straight to [Puppeteer.connect API](https://github.com/GoogleChrome/puppeteer/blob/master/docs/api.md#puppeteerconnectoptions).
+
+```
+browserWSEndpoint, ignoreHTTPSErrors
+```
+
+Also, the following options can be set as default values when [crawler.queue([options])](#crawlerqueueoptions) are executed.
+
+```
+url, timeout, priority, delay, retryCount, retryDelay, jQuery, device, username, password, shouldRequest, evaluatePage, onSuccess, onError
+```
+
+> **Note**: In practice, setting the options every time you queue the requests is not only redundant but also slow. Therefore, it's recommended to set the default values and override them depending on the necessity.
+
+#### HCCrawler.launch([options])
+
+* `options` <[Object]>
+  * `concurrency` <[number]> Maximum number of pages to open concurrently, defaults to `10`.
+* returns: <Promise<Crawler>> Promise which resolves to Crawler instance.
+
+The method launches a Chromium instance. The following options are passed straight to [Puppeteer.launch API](https://github.com/GoogleChrome/puppeteer/blob/master/docs/api.md#puppeteerlaunchoptions).
+
+```
+ignoreHTTPSErrors, headless, executablePath, slowMo, args, handleSIGINT, handleSIGTERM, handleSIGHUP, timeout, dumpio, userDataDir, env, devtools
+```
+
+Also, the following options can be set as default values when [crawler.queue([options])](#crawlerqueueoptions) are executed.
+
+```
+url, timeout, priority, delay, retryCount, retryDelay, jQuery, device, username, password, shouldRequest, evaluatePage, onSuccess, onError
+```
+
+> **Note**: In practice, setting the options every time you queue the requests is not only redundant but also slow. Therefore, it's recommended to set the default values and override them depending on the necessity.
+
+### class: Crawler
+
+HCCrawler provides a method to queue a request. It extends [Puppeteer's Browser class](https://github.com/GoogleChrome/puppeteer/blob/master/docs/api.md#class-browser), so any methods like `crawler.close()` are available.
+
+#### crawler.queue([options])
 
 * `options` <[Object]>
   * `url` <[String]> Url to navigate to. The url should include scheme, e.g. `https://`.
-  * `timeout` <[number]> Maximum navigation time in milliseconds, defaults to `30`, pass `0` to disable timeout.
-  * `waitUntil` <string|Array<string>> When to consider navigation succeeded, defaults to `load`. Given an array of event strings, navigation is considered to be successful after all events have been fired. Events can be either:
-    * `load` - consider navigation to be finished when the `load` event is fired.
-    * `domcontentloaded` - consider navigation to be finished when the `DOMContentLoaded` event is fired.
-    * `networkidle0` - consider navigation to be finished when there are no more than `0` network connections for at least `500` ms.
-    * `networkidle2` - consider navigation to be finished when there are no more than `2` network connections for at least `500` ms.
-  * `concurrency` <[number]> Number of pages to work concurrently, defaults to `10`.
   * `priority` <[number]> Basic priority of queues, defaults to `1`. Queues with larger priorities are preferred.
   * `delay` <[number]> Number of milliseconds after each request, defaults to `0`. When delay is set, concurrency must be `1`.
   * `retryCount` <[number]> Number of limit when retry fails, defaults to `3`.
   * `retryDelay` <[number]> Number of milliseconds after each retry fails, defaults to `10000`.
   * `jQuery` <[boolean]> Whether to automatically add jQuery tag to page, defaults to `true`.
-  * `captureConsole` <[boolean]> Whether to capture browser's console. Useful for debugging, defaults to `false`.
   * `device` <[String]> Device to emulate. Available devices are listed [here](https://github.com/GoogleChrome/puppeteer/blob/master/DeviceDescriptors.js).
   * `username` <[String]> Username required for Basic Authentication. pass `null` if it's not necessary.
   * `password` <[String]> Password required for Basic Authentication. pass `null` if it's not necessary.
-  * `shouldRequest(options)` <[Function]> Return `false` if you want to skip the request. Useful for skipping duplicates.
-    * `options` <[Object]> Options merged with hccrawler.queue's options.
+  * `preRequest(options)` <[Function]> Function to do anything like waiting and modifying options before each request. You can also return `false` if you want to skip the request.
+    * `options` <[Object]> [crawler.queue([options])](#crawlerqueueoptions)'s options with default values.
   * `evaluatePage()` <[Function]> Function to be evaluated in browsers. Return serializable object. If it's not serializable, the result will be `undefined`.
   * `onSuccess(response)` <[Function]> Function to be called when `evaluatePage()` successes.
     * `response` <[Object]>
       * `status` <[String]> status code of the request.
-      * `options` <[Object]> Options merged with hccrawler.queue's options.
+      * `options` <[Object]> crawler.queue([options])](#crawlerqueueoptions)'s options with default values.
       * `result` <[Serializable]> The result resolved from `evaluatePage()`.
-  * `onError(err)` <[Function]> Function to be called when request fails.
-    * `err` <[Error]> Error object.
-
-> **Note**: `url`, `timeout` are `waitUntil` options are passed to [Puppeteer](https://github.com/GoogleChrome/puppeteer). For updated information, see [Puppeteer's page.goto(url, options) API](https://github.com/GoogleChrome/puppeteer/blob/master/docs/api.md#pagegotourl-options)
-
-#### hccrawler.launch([options])
-
-The options are passed straight to [Puppeteer.launch API](https://github.com/GoogleChrome/puppeteer/blob/master/docs/api.md#puppeteerlaunchoptions).
-Following options may be useful for debugging.
+  * `onError(error)` <[Function]> Function to be called when request fails.
+    * `error` <[Error]> Error object.
 
-- `options` <[Object]>
-  - `headless` <[boolean]> Whether to run Chromium in [headless mode](https://developers.google.com/web/updates/2017/04/headless-chrome), defaults to `true` unless the `devtools` option is `true`.
-  - `slowMo` <[number]> Slows down Puppeteer operations by the specified amount of milliseconds. Useful so that you can see what is going on.
+The options can be either an object, an array, or a string. When it's an array, each item in the array will be executed. When it's a string, the options are transformed to an object with only url defined.
 
-#### hccrawler.queue([options])
-
-Options can be either an array or an object.
-All options are common with HCCrawler's constructor options except that `concurrency` option cannot be set in `hccrawler.queue`.
-When both defined, hccrawler.queue's options are always preferred.
-
-#### hccrawler.onIdle()
+#### hccrawler.close()
 
-- returns: <[Promise]> Promise is chained when queues become empty.
+returns: <[Promise]> Promise which is resolved when ther browser is closed.
 
-#### hccrawler.close()
+#### crawler.onIdle()
 
-- returns: <[Promise]> Promise is chained when ther browser is successfully closed.
+- returns: <[Promise]> Promise which is resolved when queues become empty.
 
-#### hccrawler.queueSize
+#### crawler.queueSize
 
 * returns: <[number]> The size of queues. This property is read only.
 
 ## Debugging tips
 
-### Puppeteer.launch's options
+### Launch options
 
-[hccrawler.launch](#chcrawlerlaunchoptions)'s options are passed straight to [Puppeteer.launch API](https://github.com/GoogleChrome/puppeteer/blob/master/docs/api.md#puppeteerlaunchoptions).
-It may be useful to set the `headless` and `slowMo` options so that you can see what is going on.
+[HCCrawler.launch([options])](#hccrawlerlaunchoptions)'s options are passed straight to [Puppeteer.launch API](https://github.com/GoogleChrome/puppeteer/blob/master/docs/api.md#puppeteerlaunchoptions). It may be useful to set the `headless` and `slowMo` options so that you can see what is going on.
 
 ```js
-hccrawler.launch({ headless: false, slowMo: 10 });
+HCCrawler.launch({ headless: false, slowMo: 10 });
 ```
 
 ### Enable debug logging
 
-All requests and browser's logs are logged via the [debug]'(https://github.com/visionmedia/debug)' module under the `hccrawler` namespace.
+All requests and browser's logs are logged via the [debug](https://github.com/visionmedia/debug) module under the `hccrawler` namespace.
 
 ```
 env DEBUG="hccrawler:*" node script.js
diff --git a/examples/capture-console.js b/examples/capture-console.js
deleted file mode 100644
index a0ca7d1c51edeb1d624a7a345e58f71e8e86c3a3..0000000000000000000000000000000000000000
--- a/examples/capture-console.js
+++ /dev/null
@@ -1,22 +0,0 @@
-const HCCrawler = require('../lib/hccrawler');
-
-const hccrawler = new HCCrawler({
-  captureConsole: true,
-  evaluatePage: (() => {
-    const $elem = $('p');
-    console.error('p length', $elem.length);
-    return $elem.text();
-  }),
-  onSuccess: (result => {
-    console.log('onSuccess', result);
-  }),
-});
-
-hccrawler.launch()
-  .then(() => {
-    hccrawler.queue('https://example.com');
-    return hccrawler.onIdle();
-  })
-  .then(() => {
-    hccrawler.close();
-  });
diff --git a/examples/delay.js b/examples/delay.js
index c81f887f9d8797607ae1bd75c0098855ca9ae076..60c3908b9d4c1bd1ba248e72466a3d50e7e66cf7 100644
--- a/examples/delay.js
+++ b/examples/delay.js
@@ -1,6 +1,6 @@
-const HCCrawler = require('../lib/hccrawler');
+const HCCrawler = require('../');
 
-const hccrawler = new HCCrawler({
+HCCrawler.launch({
   concurrency: 1, // Concurrency must be 1 when delay is set
   delay: 2000, // Delay 2000 millisecnds before each request is sent
   evaluatePage: (() => ({
@@ -11,15 +11,11 @@ const hccrawler = new HCCrawler({
   onSuccess: (result => {
     console.log('onSuccess', result);
   }),
-});
-
-hccrawler.launch()
-  .then(() => {
-    hccrawler.queue({ url: 'https://example.com' });
-    hccrawler.queue({ url: 'https://example.net' });
-    hccrawler.queue({ url: 'https://example.org' });
-    return hccrawler.onIdle();
-  })
-  .then(() => {
-    hccrawler.close();
+})
+  .then(crawler => {
+    crawler.queue({ url: 'https://example.com' });
+    crawler.queue({ url: 'https://example.net' });
+    crawler.queue({ url: 'https://example.org' });
+    crawler.onIdle()
+      .then(() => crawler.close());
   });
diff --git a/examples/disable-jquery.js b/examples/disable-jquery.js
index 449b2eedd30bbd3b6880c9114f469a0a956cd988..498b5b6aafafed613162e7e655ec9a0e63a1ee39 100644
--- a/examples/disable-jquery.js
+++ b/examples/disable-jquery.js
@@ -1,6 +1,6 @@
-const HCCrawler = require('../lib/hccrawler');
+const HCCrawler = require('../');
 
-const hccrawler = new HCCrawler({
+HCCrawler.launch({
   jQuery: false, // jQuery script tag won't be added
   retryCount: 3, // Retry the same request up to 3 times
   retryDelay: 1000, // Wait 1000msecs before each retry
@@ -16,16 +16,9 @@ const hccrawler = new HCCrawler({
   onError: (err => {
     console.error('onError', err);
   }),
-});
-
-hccrawler.launch()
-  .then(() => {
-    hccrawler.queue('https://example.com');
-    return hccrawler.onIdle();
-  })
-  .catch(err => {
-    console.error(err);
-  })
-  .then(() => {
-    hccrawler.close();
+})
+  .then(crawler => {
+    crawler.queue('https://example.com');
+    crawler.onIdle()
+      .then(() => crawler.close());
   });
diff --git a/examples/emulate-device.js b/examples/emulate-device.js
index 6d3955f261e26613b304a24e96f75b24b1be4d2b..1c07a81db1b9b542155310ec745a12a1293b3633 100644
--- a/examples/emulate-device.js
+++ b/examples/emulate-device.js
@@ -1,6 +1,6 @@
-const HCCrawler = require('../lib/hccrawler');
+const HCCrawler = require('../');
 
-const hccrawler = new HCCrawler({
+HCCrawler.launch({
   evaluatePage: (() => ({
     title: $('title').text(),
     h1: $('h1').text(),
@@ -9,15 +9,11 @@ const hccrawler = new HCCrawler({
   onSuccess: (result => {
     console.log('onSuccess', result);
   }),
-});
-
-hccrawler.launch()
-  .then(() => {
-    hccrawler.queue({ url: 'https://example.com', device: 'iPhone 6 Plus' });
-    hccrawler.queue({ url: 'https://example.com', device: 'iPad' });
-    hccrawler.queue({ url: 'https://example.com', device: 'Nexus 7' });
-    return hccrawler.onIdle();
-  })
-  .then(() => {
-    hccrawler.close();
+})
+  .then(crawler => {
+    crawler.queue({ url: 'https://example.com', device: 'iPhone 6 Plus' });
+    crawler.queue({ url: 'https://example.com', device: 'iPad' });
+    crawler.queue({ url: 'https://example.com', device: 'Nexus 7' });
+    crawler.onIdle()
+      .then(() => crawler.close());
   });
diff --git a/examples/multiple-queue.js b/examples/multiple-queue.js
index 10746cf0129ace2a59da752936734c7f6eb94ff4..456a8c9f07b9330f3ebd529779b67d6871d0e794 100644
--- a/examples/multiple-queue.js
+++ b/examples/multiple-queue.js
@@ -1,6 +1,6 @@
-const HCCrawler = require('../lib/hccrawler');
+const HCCrawler = require('../');
 
-const hccrawler = new HCCrawler({
+HCCrawler.launch({
   evaluatePage: (() => ({
     title: $('title').text(),
     h1: $('h1').text(),
@@ -9,14 +9,10 @@ const hccrawler = new HCCrawler({
   onSuccess: (result => {
     console.log('onSuccess', result);
   }),
-});
-
-hccrawler.launch()
-  .then(() => {
-    hccrawler.queue('https://example.com'); // one URL
-    hccrawler.queue(['https://example.net', { url: 'https://example.org' }]); // multiple URLs in different styles.
-    return hccrawler.onIdle();
-  })
-  .then(() => {
-    hccrawler.close();
+})
+  .then(crawler => {
+    crawler.queue('https://example.com'); // one URL
+    crawler.queue(['https://example.net', { url: 'https://example.org' }]); // multiple URLs in different styles.
+    crawler.onIdle()
+      .then(() => crawler.close());
   });
diff --git a/examples/override-function.js b/examples/override-function.js
index d128f1b3bc4d4bc365826f76461c3f05b9ae5905..61439b825956e3e0f89d9e2e75cfd595298b579c 100644
--- a/examples/override-function.js
+++ b/examples/override-function.js
@@ -1,6 +1,6 @@
-const HCCrawler = require('../lib/hccrawler');
+const HCCrawler = require('../');
 
-const hccrawler = new HCCrawler({
+HCCrawler.launch({
   // Global functions won't be called
   evaluatePage: (() => {
     throw new Error('Evaluate page function is not overriden!');
@@ -8,11 +8,9 @@ const hccrawler = new HCCrawler({
   onSuccess: (() => {
     throw new Error('On sucess function is not overriden!');
   }),
-});
-
-hccrawler.launch()
-  .then(() => {
-    hccrawler.queue({
+})
+  .then(crawler => {
+    crawler.queue({
       url: 'https://example.com',
       evaluatePage: (() => ({
         title: $('title').text(),
@@ -23,8 +21,6 @@ hccrawler.launch()
         console.log('onSuccess', result);
       }),
     });
-    return hccrawler.onIdle();
-  })
-  .then(() => {
-    hccrawler.close();
+    crawler.onIdle()
+      .then(() => crawler.close());
   });
diff --git a/examples/priority-queue.js b/examples/priority-queue.js
index 58033d1edf7cdd191f4c523f69b56a19363486eb..63dec69eb7bc365520f9fc02169a71163bd3868e 100644
--- a/examples/priority-queue.js
+++ b/examples/priority-queue.js
@@ -1,6 +1,6 @@
-const HCCrawler = require('../lib/hccrawler');
+const HCCrawler = require('../');
 
-const hccrawler = new HCCrawler({
+HCCrawler.launch({
   concurrency: 1,
   evaluatePage: (() => ({
     title: $('title').text(),
@@ -10,15 +10,11 @@ const hccrawler = new HCCrawler({
   onSuccess: (result => {
     console.log('onSuccess', result);
   }),
-});
-
-hccrawler.launch()
-  .then(() => {
-    hccrawler.queue({ url: 'https://example.com' }); // First queue will be requested first regardless of priority
-    hccrawler.queue({ url: 'https://example.net', priority: 1 });
-    hccrawler.queue({ url: 'https://example.org', priority: 2 }); // This queue is requested before the previous queue
-    return hccrawler.onIdle();
-  })
-  .then(() => {
-    hccrawler.close();
+})
+  .then(crawler => {
+    crawler.queue({ url: 'https://example.com' }); // First queue will be requested first regardless of priority
+    crawler.queue({ url: 'https://example.net', priority: 1 });
+    crawler.queue({ url: 'https://example.org', priority: 2 }); // This queue is requested before the previous queue
+    crawler.onIdle()
+      .then(() => crawler.close());
   });
diff --git a/examples/skip-duplicates.js b/examples/skip-duplicates.js
index eb5d88b4dbddd5c708d04b0fc3d6a2719b2c0df6..da190dd30b0922fffb8723a687c387708221c467 100644
--- a/examples/skip-duplicates.js
+++ b/examples/skip-duplicates.js
@@ -1,7 +1,8 @@
-const HCCrawler = require('../lib/hccrawler');
+const HCCrawler = require('../');
 
 const requestedObj = {};
-const hccrawler = new HCCrawler({
+
+HCCrawler.launch({
   concurrency: 1,
   evaluatePage: (() => ({
     title: $('title').text(),
@@ -12,20 +13,15 @@ const hccrawler = new HCCrawler({
     requestedObj[result.options.url] = true;
     console.log('onSuccess', result);
   }),
-  shouldRequest: (options => {
+  preRequest: (options => {
     if (requestedObj[options.url]) return false;
     return true;
   }),
-});
-
-hccrawler.launch()
-  .then(() => {
-    hccrawler.queue('https://example.com');
-    hccrawler.queue('https://example.net');
-    hccrawler.queue('https://example.org');
-    hccrawler.queue('https://example.com'); // The queue won't be requested
-    return hccrawler.onIdle();
-  })
-  .then(() => {
-    hccrawler.close();
+})
+  .then(crawler => {
+    crawler.queue('https://example.com');
+    crawler.queue('https://example.net');
+    crawler.queue('https://example.com'); // The queue won't be requested
+    crawler.onIdle()
+      .then(() => crawler.close());
   });
diff --git a/lib/crawler.js b/lib/crawler.js
new file mode 100644
index 0000000000000000000000000000000000000000..c0616af5fa17a64c90f6eb40e16d0e1fb8755a14
--- /dev/null
+++ b/lib/crawler.js
@@ -0,0 +1,102 @@
+const _ = require('lodash');
+const PQueue = require('p-queue');
+const devices = require('puppeteer/DeviceDescriptors');
+const debugBrowser = require('debug')('hccrawler:browser');
+const debugRequest = require('debug')('hccrawler:request');
+const { delay } = require('./helper');
+
+const deviceNames = Object.keys(devices);
+const jQueryPath = require.resolve('jQuery');
+
+class Browser {
+  constructor(browser, options) {
+    this.browser = browser;
+    this.options = _.extend({
+      concurrency: 10,
+      priority: 1,
+      delay: 0,
+      retryCount: 3,
+      retryDelay: 10000,
+      jQuery: true,
+    }, options);
+    this._pQueue = new PQueue({
+      concurrency: this.options.concurrency,
+    });
+  }
+
+  queue(options) {
+    _.each(_.isArray(options) ? options : [options], _options => {
+      let mergedOptions = _.isString(_options) ? { url: _options } : _options;
+      mergedOptions = _.extend({}, this.options, mergedOptions);
+      this._validateOptions(mergedOptions);
+      this._pQueue.add(() => this._request(mergedOptions), {
+        priority: mergedOptions.priority,
+      });
+    });
+  }
+
+  _validateOptions(options) {
+    if (!options.url) throw new Error('Url must be defined!');
+    if (!options.evaluatePage) throw new Error('Evaluate page function must be defined!');
+    if (!options.onSuccess) throw new Error('On success function must be defined!');
+    if (options.device && !_.includes(deviceNames, options.device)) throw new Error('Specified device is not supported!');
+    if (options.delay > 0 && options.concurrency !== 1) throw new Error('Concurrency must be 1 when delay is set!');
+  }
+
+  _request(options, retryCount = 0) {
+    if (retryCount === 0) debugRequest(`Start requesting ${options.url}`);
+    return Promise.resolve(options.preRequest ? options.preRequest(options) : true)
+      .then(shouldRequest => {
+        if (!shouldRequest) {
+          debugRequest(`Skip requesting ${options.url}`);
+          return Promise.resolve();
+        }
+        return this.browser.newPage()
+          .then(page => {
+            page.on('console', (msg => void debugBrowser(msg.text)));
+            const credentials = _.pick(options, ['username', 'password']);
+            if (options.username || options.password) page.authenticate(credentials);
+            const emulate = options.device
+              ? page.emulate(devices[options.device])
+              : Promise.resolve();
+            return emulate.then(() => page.goto(options.url, _.pick(options, ['timeout', 'waitUntil'])))
+              .then(res => {
+                debugRequest(`Opened page for ${options.url}`);
+                const addScriptTag = options.jQuery
+                  ? page.addScriptTag({ path: jQueryPath })
+                  : Promise.resolve();
+                return addScriptTag.then(() => page.evaluate(options.evaluatePage))
+                  .then(result => options.onSuccess({ status: res.status, options, result }))
+                  .then(() => void debugRequest(`End requesting ${options.url}`))
+                  .then(() => page.close())
+                  .then(() => void debugRequest(`Closed page for ${options.url}`))
+                  .then(() => delay(options.delay));
+              });
+          });
+      })
+      .catch(err => {
+        if (retryCount >= options.retryCount) throw new Error(`Retry give-up for requesting ${options.url}!`, err);
+        debugRequest(`Retry requesting ${options.url} ${retryCount + 1} times`);
+        return delay(options.retryDelay).then(() => this._request(options, retryCount + 1));
+      })
+      .catch(err => {
+        debugRequest(`Retry give-up for requesting ${options.url} after ${retryCount} tries`);
+        const onError = options.onError || _.noop;
+        return onError(err);
+      });
+  }
+
+  close() {
+    return this.browser.close();
+  }
+
+  onIdle() {
+    return this._pQueue.onIdle();
+  }
+
+  get queueSize() {
+    return this._pQueue.size + 1;
+  }
+}
+
+module.exports = Browser;
diff --git a/lib/hccrawler.js b/lib/hccrawler.js
index 9ef78a5bf9fbf79ac3b500840da35d9bd4a6aa5d..ac9b677b50557d9c96f32943c1ca49ee6f1f128a 100644
--- a/lib/hccrawler.js
+++ b/lib/hccrawler.js
@@ -1,110 +1,36 @@
 const _ = require('lodash');
-const PQueue = require('p-queue');
-const puppeteer = require('puppeteer');
-const devices = require('puppeteer/DeviceDescriptors');
-const debugBrowser = require('debug')('hccrawler:browser');
-const debugRequest = require('debug')('hccrawler:request');
-const { delay } = require('./helper');
-
-const deviceNames = Object.keys(devices);
-const jQueryPath = require.resolve('jQuery');
-
-class HCCrawler {
-  constructor(options) {
-    this.options = _.extend({
-      concurrency: 10,
-      priority: 1,
-      delay: 0,
-      retryCount: 3,
-      retryDelay: 10000,
-      jQuery: true,
-      captureConsole: false,
-    }, options);
-    this._pQueue = new PQueue({
-      concurrency: this.options.concurrency,
-    });
-  }
-
-  launch(options) {
-    return puppeteer.launch(options)
-      .then(browser => {
-        this.browser = browser;
-      });
-  }
-
-  queue(options) {
-    if (!this.browser) throw new Error('Browser is not launched yet!');
-    _.each(_.isArray(options) ? options : [options], _options => {
-      let mergedOptions = _.isString(_options) ? { url: _options } : _options;
-      mergedOptions = _.extend({}, this.options, mergedOptions);
-      this._validateOptions(mergedOptions);
-      this._pQueue.add(() => this._request(mergedOptions), {
-        priority: mergedOptions.priority,
-      });
-    });
-  }
-
-  _validateOptions(options) {
-    if (!options.url) throw new Error('Url must be defined!');
-    if (!options.evaluatePage) throw new Error('Evaluate page function must be defined!');
-    if (!options.onSuccess) throw new Error('On success function must be defined!');
-    if (options.device && !_.includes(deviceNames, options.device)) throw new Error('Specified device is not supported!');
-    if (options.delay > 0 && options.concurrency !== 1) throw new Error('Concurrency must be 1 when delay is set!');
-  }
-
-  _request(options, retryCount = 0) {
-    if (retryCount === 0) debugRequest(`Start requesting ${options.url}`);
-    return Promise.resolve(options.shouldRequest ? options.shouldRequest(options) : true)
-      .then(shouldRequest => {
-        if (!shouldRequest) {
-          debugRequest(`Skip requesting ${options.url}`);
-          return Promise.resolve();
-        }
-        return this.browser.newPage()
-          .then(page => {
-            page.on('console', (msg => void debugBrowser(msg.text)));
-            const credentials = _.pick(options, ['username', 'password']);
-            if (options.username || options.password) page.authenticate(credentials);
-            const emulate = options.device
-              ? page.emulate(devices[options.device])
-              : Promise.resolve();
-            return emulate.then(() => page.goto(options.url, _.pick(options, ['timeout', 'waitUntil'])))
-              .then(res => {
-                debugRequest(`Opened page for ${options.url}`);
-                const addScriptTag = options.jQuery
-                  ? page.addScriptTag({ path: jQueryPath })
-                  : Promise.resolve();
-                return addScriptTag.then(() => page.evaluate(options.evaluatePage))
-                  .then(result => options.onSuccess({ status: res.status, options, result }))
-                  .then(() => void debugRequest(`End requesting ${options.url}`))
-                  .then(() => page.close())
-                  .then(() => void debugRequest(`Closed page for ${options.url}`))
-                  .then(() => delay(options.delay));
-              });
-          });
-      })
-      .catch(err => {
-        if (retryCount >= options.retryCount) throw new Error(`Retry give-up for requesting ${options.url}!`, err);
-        debugRequest(`Retry requesting ${options.url} ${retryCount + 1} times`);
-        return delay(options.retryDelay).then(() => this._request(options, retryCount + 1));
-      })
-      .catch(err => {
-        debugRequest(`Retry give-up for requesting ${options.url} after ${retryCount} tries`);
-        const onError = options.onError || _.noop;
-        return onError(err);
-      });
-  }
-
-  close() {
-    return this.browser.close();
-  }
-
-  onIdle() {
-    return this._pQueue.onIdle();
-  }
-
-  get queueSize() {
-    return this._pQueue.size + 1;
+const Puppeteer = require('puppeteer');
+const Crawler = require('./crawler');
+
+const PUPPETEER_CONNECT_OPTIONS = [
+  'browserWSEndpoint',
+  'ignoreHTTPSErrors',
+];
+const PUPPETEER_LAUNCH_OPTIONS = [
+  'ignoreHTTPSErrors',
+  'headless',
+  'executablePath',
+  'slowMo',
+  'args',
+  'handleSIGINT',
+  'handleSIGTERM',
+  'handleSIGHUP',
+  'timeout',
+  'dumpio',
+  'userDataDir',
+  'env',
+  'devtools',
+];
+
+class HCCrawler extends Puppeteer {
+  static connect(options) {
+    return super.connect(_.pick(options, PUPPETEER_CONNECT_OPTIONS))
+      .then(browser => new Crawler(browser, _.omit(options, PUPPETEER_CONNECT_OPTIONS)));
+  }
+
+  static launch(options) {
+    return super.launch(_.pick(options, PUPPETEER_LAUNCH_OPTIONS))
+      .then(browser => new Crawler(browser, _.omit(options, PUPPETEER_LAUNCH_OPTIONS)));
   }
 }