diff --git a/.gitignore b/.gitignore new file mode 100644 index 00000000..24e554cf --- /dev/null +++ b/.gitignore @@ -0,0 +1,8 @@ +a.out +build +node_modules +deps +.idea +core +.env +.vscode \ No newline at end of file diff --git a/README.md b/README.md index 2362c85d..dda7e0d5 100644 --- a/README.md +++ b/README.md @@ -1,88 +1,712 @@ -NAME ----- +# node-odbc 2.0.0 is now in beta! +A new version of `odbc` has been released in beta! The initial release of this beta can be found at [https://www.npmjs.com/package/odbc/v/2.0.0-beta.0](https://www.npmjs.com/package/odbc/v/2.0.0-beta.0), while later releases can be found under the Versions tab on npm. This URL contains information including the README file outlining the new API. + +To install the beta version, include the `beta` tag with your command: +``` +npm install odbc@beta +``` + +Test it out and give feedback on the issues of the official git repository! + +--- + +node-odbc +--------- + +An asynchronous/synchronous interface for node.js to unixODBC and its supported +drivers. + +requirements +------------ + +* unixODBC binaries and development libraries for module compilation + * on Ubuntu/Debian `sudo apt-get install unixodbc unixodbc-dev` + * on RedHat/CentOS `sudo yum install unixODBC unixODBC-devel` + * on OSX + * using macports.org `sudo port unixODBC` + * using brew `brew install unixODBC` + * on IBM i `yum install unixODBC unixODBC-devel` (requires [yum](http://ibm.biz/ibmi-rpms)) +* odbc drivers for target database +* properly configured odbc.ini and odbcinst.ini. + +install +------- + +After insuring that all requirements are installed you may install by one of the +two following options: + +### git + +```bash +git clone git://github.com/wankdanker/node-odbc.git +cd node-odbc +node-gyp configure build +``` +### npm + +```bash +npm install odbc +``` + +quick example +------------- + +```javascript +var db = require('odbc')() + , cn = process.env.ODBC_CONNECTION_STRING + ; + +db.open(cn, function (err) { + if (err) return console.log(err); + + db.query('select * from user where user_id = ?', [42], function (err, data) { + if (err) console.log(err); + + console.log(data); + + db.close(function () { + console.log('done'); + }); + }); +}); +``` + +api +--- + +### Database + +The simple api is based on instances of the `Database` class. You may get an +instance in one of the following ways: + +```javascript +require("odbc").open(connectionString, function (err, db){ + //db is already open now if err is falsy +}); +``` + +or by using the helper function: + +```javascript +var db = require("odbc")(); +``` + +or by creating an instance with the constructor function: + +```javascript +var Database = require("odbc").Database + , db = new Database(); +``` + +#### .connected + +Returns a Boolean of whether the database is currently connected. + +```javascript +var db = require("odbc")(); + +console.log( "Connected: " + db.connected ); +``` + +#### .open(connectionString, callback) + +Open a connection to a database. + +* **connectionString** - The ODBC connection string for your database +* **callback** - `callback (err)` + +```javascript +var db = require("odbc")() + , cn = "DRIVER={FreeTDS};SERVER=host;UID=user;PWD=password;DATABASE=dbname" + ; + +db.open(cn, function (err) { + if (err) { + return console.log(err); + } + + //we now have an open connection to the database +}); +``` +#### .openSync(connectionString) + +Synchronously open a connection to a database. + +* **connectionString** - The ODBC connection string for your database + +```javascript +var db = require("odbc")() + , cn = "DRIVER={FreeTDS};SERVER=host;UID=user;PWD=password;DATABASE=dbname" + ; + +try { + var result = db.openSync(cn); +} +catch (e) { + console.log(e.message); +} + +//we now have an open connection to the database +``` + +#### .query(sqlQuery [, bindingParameters], callback) + +Issue an asynchronous SQL query to the database which is currently open. + +* **sqlQuery** - The SQL query to be executed. +* **bindingParameters** - _OPTIONAL_ - An array of values that will be bound to + any '?' characters in `sqlQuery`. +* **callback** - `callback (err, rows, moreResultSets)` + +```javascript +var db = require("odbc")() + , cn = "DRIVER={FreeTDS};SERVER=host;UID=user;PWD=password;DATABASE=dbname" + ; + +db.open(cn, function (err) { + if (err) { + return console.log(err); + } + + //we now have an open connection to the database + //so lets get some data + db.query("select top 10 * from customers", function (err, rows, moreResultSets) { + if (err) { + return console.log(err); + } + + console.log(rows); + + //if moreResultSets is truthy, then this callback function will be called + //again with the next set of rows. + }); +}); +``` + +#### .querySync(sqlQuery [, bindingParameters]) + +Synchronously issue a SQL query to the database that is currently open. + +* **sqlQuery** - The SQL query to be executed. +* **bindingParameters** - _OPTIONAL_ - An array of values that will be bound to + any '?' characters in `sqlQuery`. + +```javascript +var db = require("odbc")() + , cn = "DRIVER={FreeTDS};SERVER=host;UID=user;PWD=password;DATABASE=dbname" + ; + +//blocks until the connection is opened. +db.openSync(cn); + +//blocks until the query is completed and all data has been acquired +var rows = db.querySync("select top 10 * from customers"); + +console.log(rows); +``` + +#### .close(callback) + +Close the currently opened database. + +* **callback** - `callback (err)` + +```javascript +var db = require("odbc")() + , cn = "DRIVER={FreeTDS};SERVER=host;UID=user;PWD=password;DATABASE=dbname" + ; + +db.open(cn, function (err) { + if (err) { + return console.log(err); + } + + //we now have an open connection to the database + + db.close(function (err) { + console.log("the database connection is now closed"); + }); +}); +``` + +#### .closeSync() + +Synchronously close the currently opened database. + +```javascript +var db = require("odbc")() + , cn = "DRIVER={FreeTDS};SERVER=host;UID=user;PWD=password;DATABASE=dbname" + ; + +//Blocks until the connection is open +db.openSync(cn); + +//Blocks until the connection is closed +db.closeSync(); +``` + +#### .prepare(sql, callback) + +Prepare a statement for execution. + +* **sql** - SQL string to prepare +* **callback** - `callback (err, stmt)` + +Returns a `Statement` object via the callback + +```javascript +var db = require("odbc")() + , cn = "DRIVER={FreeTDS};SERVER=host;UID=user;PWD=password;DATABASE=dbname" + ; + +//Blocks until the connection is open +db.openSync(cn); + +db.prepare("insert into hits (col1, col2) VALUES (?, ?)", function (err, stmt) { + if (err) { + //could not prepare for some reason + console.log(err); + return db.closeSync(); + } + + //Bind and Execute the statment asynchronously + stmt.execute(['something', 42], function (err, result) { + result.closeSync(); + + //Close the connection + db.closeSync(); + }); +}) +``` + +#### .prepareSync(sql) + +Synchronously prepare a statement for execution. + +* **sql** - SQL string to prepare + +Returns a `Statement` object -node-odbc - An asynchronous Node interface to unixodbc and its supported drivers +```javascript +var db = require("odbc")() + , cn = "DRIVER={FreeTDS};SERVER=host;UID=user;PWD=password;DATABASE=dbname" + ; -SYNOPSIS --------- +//Blocks until the connection is open +db.openSync(cn); - var sys = require("sys"); - var odbc = require("odbc"); +//Blocks while preparing the statement +var stmt = db.prepareSync("insert into hits (col1, col2) VALUES (?, ?)") - var db = new odbc.Database(); - db.open("DRIVER={FreeTDS};SERVER=host;UID=user;PWD=password;DATABASE=dbname", function(err) - { - db.query("select * from table", function(err, rows, moreResultSets) - { - sys.debug(sys.inspect(rows)); - db.close(function(){}); +//Bind and Execute the statment asynchronously +stmt.execute(['something', 42], function (err, result) { + result.closeSync(); + + //Close the connection + db.closeSync(); +}); +``` + +#### .beginTransaction(callback) + +Begin a transaction + +* **callback** - `callback (err)` + +#### .beginTransactionSync() + +Synchronously begin a transaction + +#### .commitTransaction(callback) + +Commit a transaction + +* **callback** - `callback (err)` + +```javascript +var db = require("odbc")() + , cn = "DRIVER={FreeTDS};SERVER=host;UID=user;PWD=password;DATABASE=dbname" + ; + +//Blocks until the connection is open +db.openSync(cn); + +db.beginTransaction(function (err) { + if (err) { + //could not begin a transaction for some reason. + console.log(err); + return db.closeSync(); + } + + var result = db.querySync("insert into customer (customerCode) values ('stevedave')"); + + db.commitTransaction(function (err) { + if (err) { + //error during commit + console.log(err); + return db.closeSync(); + } + + console.log(db.querySync("select * from customer where customerCode = 'stevedave'")); + + //Close the connection + db.closeSync(); + }); +}) +``` + +#### .commitTransactionSync() + +Synchronously commit a transaction + +```javascript +var db = require("odbc")() + , cn = "DRIVER={FreeTDS};SERVER=host;UID=user;PWD=password;DATABASE=dbname" + ; + +//Blocks until the connection is open +db.openSync(cn); + +db.beginTransactionSync(); + +var result = db.querySync("insert into customer (customerCode) values ('stevedave')"); + +db.commitTransactionSync(); + +console.log(db.querySync("select * from customer where customerCode = 'stevedave'")); + +//Close the connection +db.closeSync(); +``` + +#### .rollbackTransaction(callback) + +Rollback a transaction + +* **callback** - `callback (err)` + +```javascript +var db = require("odbc")() + , cn = "DRIVER={FreeTDS};SERVER=host;UID=user;PWD=password;DATABASE=dbname" + ; + +//Blocks until the connection is open +db.openSync(cn); + +db.beginTransaction(function (err) { + if (err) { + //could not begin a transaction for some reason. + console.log(err); + return db.closeSync(); + } + + var result = db.querySync("insert into customer (customerCode) values ('stevedave')"); + + db.rollbackTransaction(function (err) { + if (err) { + //error during rollback + console.log(err); + return db.closeSync(); + } + + console.log(db.querySync("select * from customer where customerCode = 'stevedave'")); + + //Close the connection + db.closeSync(); + }); +}) +``` + +#### .rollbackTransactionSync() + +Synchronously rollback a transaction + +```javascript +var db = require("odbc")() + , cn = "DRIVER={FreeTDS};SERVER=host;UID=user;PWD=password;DATABASE=dbname" + ; + +//Blocks until the connection is open +db.openSync(cn); + +db.beginTransactionSync(); + +var result = db.querySync("insert into customer (customerCode) values ('stevedave')"); + +db.rollbackTransactionSync(); + +console.log(db.querySync("select * from customer where customerCode = 'stevedave'")); + +//Close the connection +db.closeSync(); +``` + +---------- + +### Pool + +The node-odbc `Pool` is a rudimentary connection pool which will attempt to have +database connections ready and waiting for you when you call the `open` method. + +If you use a `Pool` instance, any connection that you close will cause another +connection to be opened for that same connection string. That connection will +be used the next time you call `Pool.open()` for the same connection string. + +This should probably be changed. + +#### .open(connectionString, callback) + +Get a Database` instance which is already connected to `connectionString` + +* **connectionString** - The ODBC connection string for your database +* **callback** - `callback (err, db)` + +```javascript +var Pool = require("odbc").Pool + , pool = new Pool() + , cn = "DRIVER={FreeTDS};SERVER=host;UID=user;PWD=password;DATABASE=dbname" + ; + +pool.open(cn, function (err, db) { + if (err) { + return console.log(err); + } + + //db is now an open database connection and can be used like normal + //if we run some queries with db.query(...) and then call db.close(); + //a connection to `cn` will be re-opened silently behind the scense + //and will be ready the next time we do `pool.open(cn)` +}); +``` + +#### .close(callback) + +Close all connections in the `Pool` instance + +* **callback** - `callback (err)` + +```javascript +var Pool = require("odbc").Pool + , pool = new Pool() + , cn = "DRIVER={FreeTDS};SERVER=host;UID=user;PWD=password;DATABASE=dbname" + ; + +pool.open(cn, function (err, db) { + if (err) { + return console.log(err); + } + + //db is now an open database connection and can be used like normal + //but all we will do now is close the whole pool + + pool.close(function () { + console.log("all connections in the pool are closed"); + }); +}); +``` + +example +------- + +```javascript +var odbc = require("odbc") + , util = require('util') + , db = new odbc.Database() + ; + +var connectionString = "DRIVER={FreeTDS};SERVER=host;UID=user;PWD=password;DATABASE=dbname"; + +db.open(connectionString, function(err) { + db.query("select * from table", function(err, rows, moreResultSets) { + console.log(util.inspect(rows, null, 10)); + + db.close(function() { + console.log("Database connection closed"); }); }); +}); +``` +testing +------- -DESCRIPTION ------------ +Tests can be run by executing `npm test` from within the root of the node-odbc +directory. You can also run the tests by executing `node run-tests.js` from +within the `/test` directory. -unixODBC binding to node. Needs a properly configured odbc(inst).ini. Tested locally using the FreeTDS and Postgres drivers. +By default, the tests are setup to run against a sqlite3 database which is +created at test time. This will require proper installation of the sqlite odbc +driver. On Ubuntu: `sudo apt-get install libsqliteodbc` +build options +------------- -INSTALLATION ------------- +### Debug -- Make sure you have the unixODBC binaries and unixODBC headers installed and the drivers configured. - - On ubuntu and probably most linux distros the unixODBC header files are in the unixodbc-dev package (apt-get install unixodbc-dev) - - On OSX one can use macports.org to install unixODBC (sudo port unixODBC) +If you would like to enable debugging messages to be displayed you can add the +flag `DEBUG` to the defines section of the `binding.gyp` file and then execute +`node-gyp rebuild`. -###git +```javascript + +'defines' : [ + "DEBUG" +], + +``` - git clone git://github.com/w1nk/node-odbc.git - cd node-odbc - node-waf configure build +### Dynodbc -###npm +You may also enable the ability to load a specific ODBC driver and bypass the +ODBC driver management layer. A performance increase of ~5Kqps was seen using +this method with the libsqlite3odbc driver. To do this, specify the `dynodbc` +flag in the defines section of the `binding.gyp` file. You will also need to +remove any library references in `binding.gyp`. Then execute `node-gyp +rebuild`. - npm install odbc +```javascript + +'defines' : [ + "dynodbc" +], +'conditions' : [ + [ 'OS == "linux"', { + 'libraries' : [ + //remove this: '-lodbc' + ], + +``` +### Unicode -TIPS ----- +By default, UNICODE suppport is enabled. This should provide the most accurate +way to get Unicode strings submitted to your database. For best results, you +may want to put your Unicode string into bound parameters. -- If you are using the FreeTDS ODBC driver and you have column names longer than 30 characters, you should add "TDS_Version=7.0" to your connection string to retrive the full column name. +However, if you experience issues or you think that submitting UTF8 strings will +work better or faster, you can remove the `UNICODE` define in `binding.gyp` -###Example +```javascript + +'defines' : [ + "UNICODE" +], + +``` - "DRIVER={FreeTDS};SERVER=host;UID=user;PWD=password;DATABASE=dbname;TDS_Version=7.0" +### timegm vs timelocal +When converting a database time to a C time one may use `timegm` or `timelocal`. See +`man timegm` for the details of these two functions. By default the node-odbc bindings +use `timelocal`. If you would prefer for it to use `timegm` then specify the `TIMEGM` +define in `binding.gyp` -BUGS ----- +```javascript + +'defines' : [ + "TIMEGM" +], + +``` + +### Strict Column Naming -None known, but there might be one ;). +When column names are retrieved from ODBC, you can request by SQL_DESC_NAME or +SQL_DESC_LABEL. SQL_DESC_NAME is the exact column name or none if there is none +defined. SQL_DESC_LABEL is the heading or column name or calculation. +SQL_DESC_LABEL is used by default and seems to work well in most cases. -COMPLETE --------- +If you want to use the exact column name via SQL_DESC_NAME, enable the `STRICT_COLUMN_NAMES` +define in `binding.gyp` -- Connection Management -- Querying -- Database Descriptions -- Binding Parameters (thanks to @gurzgri) +```javascript + +'defines' : [ + "STRICT_COLUMN_NAMES" +], + +``` -TODO +tips ---- +### Using node < v0.10 on Linux -- Option to emit on each record to avoid collecting the entire dataset first and increasing memory usage -- More error handling. -- Tests -- SQLGetData needs to support retrieving multiple chunks and concatenation in the case of large column values +Be aware that through node v0.9 the uv_queue_work function, which is used to +execute the ODBC functions on a separate thread, uses libeio for its thread +pool. This thread pool by default is limited to 4 threads. -ACKNOWLEDGEMENTS ----------------- +This means that if you have long running queries spread across multiple +instances of odbc.Database() or using odbc.Pool(), you will only be able to +have 4 concurrent queries. -- orlandov's node-sqlite binding was the framework I used to figure out using eio's thread pool to handle blocking calls since non blocking odbc doesn't seem to appear until 3.8. +You can increase the thread pool size by using @developmentseed's [node-eio] +(https://github.com/developmentseed/node-eio). -AUTHORS +#### install: +```bash +npm install eio +``` + +#### usage: +```javascript +var eio = require('eio'); +eio.setMinParallel(threadCount); +``` + +### Using the FreeTDS ODBC driver + +* If you have column names longer than 30 characters, you should add + "TDS_Version=7.0" to your connection string to retrive the full column name. + * Example : "DRIVER={FreeTDS};SERVER=host;UID=user;PWD=password;DATABASE=dbname;TDS_Version=7.0" +* If you got error "[unixODBC][FreeTDS][SQL Server]Unable to connect to data source" + Try use SERVERNAME instead of SERVER + * Example : "DRIVER={FreeTDS};SERVERNAME=host;UID=user;PWD=password;DATABASE=dbname" +* Be sure that your odbcinst.ini has the proper threading configuration for your + FreeTDS driver. If you choose the incorrect threading model it may cause + the thread pool to be blocked by long running queries. This is what + @wankdanker currently uses on Ubuntu 12.04: + +``` +[FreeTDS] +Description = TDS driver (Sybase/MS SQL) +Driver = libtdsodbc.so +Setup = libtdsS.so +CPTimeout = 120 +CPReuse = +Threading = 0 +``` + +contributors ------ +* Dan VerWeire (dverweire@gmail.com) +* Lee Smith (notwink@gmail.com) +* Bruno Bigras +* Christian Ensel +* Yorick +* Joachim Kainz +* Oleg Efimov +* paulhendrix + +license +------- + +Copyright (c) 2013 Dan VerWeire + +Copyright (c) 2010 Lee Smith + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: -Lee Smith (notwink@gmail.com) +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. -Dan VerWeire (dverweire@gmail.com) +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/binding.gyp b/binding.gyp index c05093db..52f55f6e 100644 --- a/binding.gyp +++ b/binding.gyp @@ -1,26 +1,61 @@ { - 'targets' : [ - { - 'target_name' : 'odbc_bindings', - 'sources' : [ - 'src/Database.cpp' - ], - 'libraries' : [ - '-lodbc' - ], - 'include_dirs' : [ - '/usr/local/lib', - '/opt/local/lib', - '/usr/include' - ], - 'conditions' : [ - [ 'OS == "linux"', { - - }], - [ 'OS=="win"', { - - }] - ] - } - ] + 'targets' : [ + { + 'target_name' : 'odbc_bindings', + 'sources' : [ + 'src/odbc.cpp', + 'src/odbc_connection.cpp', + 'src/odbc_statement.cpp', + 'src/odbc_result.cpp', + 'src/dynodbc.cpp' + ], + 'cflags' : ['-Wall', '-Wextra', '-Wno-unused-parameter'], + 'include_dirs': [ + " void): void; + openSync(connctionString: string | ConnctionInfo): void; + close(cb: (err: any) => void): void; + closeSync(): void; + createStatement(cb: (err: any, stmt: ODBCStatement) => void): void; + createStatementSync(): ODBCStatement; + query(sql: string, cb: (err: any, rows: ResultRow[], moreResultSets: any) => void): void; + query(sql: string, bindingParameters: any[], cb: (err: any, rows: ResultRow[], moreResultSets: any) => void): void; + querySync(sql: string, bindingParameters?: any[]): ResultRow[]; + beginTransaction(cb: (err: any) => void): void; + beginTransactionSync(): void; + endTransaction(rollback: boolean, cb: (err: any) => void): void; + endTransactionSync(rollback: boolean): void; + tables(catalog: string | null, schema: string | null, table: string | null, type: string | null, cb: (err: any, result: ODBCResult) => void): void; + columns(catalog: string | null, schema: string | null, table: string | null, column: string | null, cb: (err: any, result: ODBCResult) => void): void; + } + + export interface ResultRow { + [key: string]: any; + } + + export interface ODBCResult { + fetchMode: number; + fetchAll(cb: (err: any, data: ResultRow[]) => void): void; + fetchAllSync(): ResultRow[]; + fetch(cb: (err: any, data: ResultRow) => void): void; + fetchSync(): ResultRow; + closeSync(): void; + moreResultsSync(): any; + getColumnNamesSync(): string[]; + } + + export interface ODBCStatement { + queue: SimpleQueue; + execute(cb: (err: any, result: ODBCResult) => void): void; + execute(bindingParameters: any[], cb: (err: any, result: ODBCResult) => void): void; + executeSync(bindingParameters?: any[]): ODBCResult; + executeDirect(sql: string, cb: (err: any, result: ODBCResult) => void): void; + executeDirect(sql: string, bindingParameters: any[], cb: (err: any, result: ODBCResult) => void): void; + executeDirectSync(sql: string, bindingParameters?: any[]): ODBCResult; + executeNonQuery(cb: (err: any, result: number) => void): void; + executeNonQuery(bindingParameters: any[], cb: (err: any, result: number) => void): void; + executeNonQuerySync(bindingParameters?: any[]): number; + prepare(sql: string, cb: (err: any) => void): void; + prepareSync(sql: string): void; + bind(bindingParameters: any[], cb: (err: any) => void): void; + bindSync(bindingParameters: any[]): void; + closeSync(): void; + } + + export class Database { + constructor(options?: DatabaseOptions); + conn: ODBCConnection; + queue: SimpleQueue; + connected: boolean; + connectTimeout: number; + loginTimeout: number; + SQL_CLOSE: number; + SQL_DROP: number; + SQL_UNBIND: number; + SQL_RESET_PARAMS: number; + SQL_DESTROY: number; + FETCH_ARRAY: number; + FETCH_OBJECT: number; + open(connctionString: string | ConnctionInfo, cb: (err: any, result: any) => void): void; + openSync(connctionString: string | ConnctionInfo): void; + close(cb: (err: any) => void): void; + closeSync(): void; + query(sql: string, cb: (err: any, rows: ResultRow[], moreResultSets: any) => void): void; + query(sql: string, bindingParameters: any[], cb: (err: any, rows: ResultRow[], moreResultSets: any) => void): void; + querySync(sql: string, bindingParameters?: any[]): ResultRow[]; + queryResult(sql: string, cb: (err: any, result: ODBCResult) => void): void; + queryResult(sql: string, bindingParameters: any[], cb: (err: any, result: ODBCResult) => void): void; + queryResultSync(sql: string, bindingParameters?: any[]): ODBCResult; + prepare(sql: string, cb: (err: any, statement: ODBCStatement) => void): void; + prepareSync(sql: string): ODBCStatement; + beginTransaction(cb: (err: any) => void): void; + beginTransactionSync(): void; + endTransaction(rollback: boolean, cb: (err: any) => void): void; + endTransactionSync(rollback: boolean): void; + commitTransaction(cb: (err: any) => void): void; + commitTransactionSync(): void; + rollbackTransaction(cb: (err: any) => void): void; + rollbackTransactionSync(): void; + tables(catalog: string | null, schema: string | null, table: string | null, type: string | null, cb: (err: any, result: ODBCTable[]) => void): void; + columns(catalog: string | null, schema: string | null, table: string | null, column: string | null, cb: (err: any, result: ODBCColumn[]) => void): void; + describe(options: DescribeOptions, cb: (err: any, result: (ODBCTable & ODBCColumn)[]) => void): void; + } + + export class Pool { + constructor(options?: DatabaseOptions); + open(connctionString: string, cb: (err: any, db: Database) => void): void; + close(cb: (err: any) => void): void; + } + + export function open(connctionString: string | ConnctionInfo, cb: (err: any, result: any) => void): void; +} + +export = odbc; diff --git a/lib/odbc.js b/lib/odbc.js new file mode 100644 index 00000000..23a2de42 --- /dev/null +++ b/lib/odbc.js @@ -0,0 +1,820 @@ +/* + Copyright (c) 2013, Dan VerWeire + Copyright (c) 2010, Lee Smith + + Permission to use, copy, modify, and/or distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ + +var odbc = require("bindings")("odbc_bindings") + , SimpleQueue = require("./simple-queue") + , util = require("util") + ; + +module.exports = function (options) { + return new Database(options); +} + +module.exports.debug = false; + +module.exports.Database = Database; +module.exports.ODBC = odbc.ODBC; +module.exports.ODBCConnection = odbc.ODBCConnection; +module.exports.ODBCStatement = odbc.ODBCStatement; +module.exports.ODBCResult = odbc.ODBCResult; +module.exports.loadODBCLibrary = odbc.loadODBCLibrary; + +module.exports.open = function (connectionString, options, cb) { + var db; + + if (typeof options === 'function') { + cb = options; + options = null; + } + + db = new Database(options); + + db.open(connectionString, function (err) { + cb(err, db); + }); +} + +function Database(options) { + var self = this; + + options = options || {}; + + if (odbc.loadODBCLibrary) { + if (!options.library && !module.exports.library) { + throw new Error("You must specify a library when complied with dynodbc, " + + "otherwise this jams will segfault."); + } + + if (!odbc.loadODBCLibrary(options.library || module.exports.library)) { + throw new Error("Could not load library. You may need to specify full " + + "path."); + } + } + + self.odbc = (options.odbc) ? options.odbc : new odbc.ODBC(); + self.odbc.domain = process.domain; + self.queue = new SimpleQueue(); + self.fetchMode = options.fetchMode || null; + self.connected = false; + self.connectTimeout = (options.hasOwnProperty('connectTimeout')) + ? options.connectTimeout + : null + ; + self.loginTimeout = (options.hasOwnProperty('loginTimeout')) + ? options.loginTimeout + : null + ; +} + +//Expose constants +Object.keys(odbc.ODBC).forEach(function (key) { + if (typeof odbc.ODBC[key] !== "function") { + //On the database prototype + Database.prototype[key] = odbc.ODBC[key]; + + //On the exports + module.exports[key] = odbc.ODBC[key]; + } +}); + +Database.prototype.open = function (connectionString, cb) { + var self = this; + + if (typeof(connectionString) == "object") { + var obj = connectionString; + connectionString = ""; + + Object.keys(obj).forEach(function (key) { + connectionString += key + "=" + obj[key] + ";"; + }); + } + + self.odbc.createConnection(function (err, conn) { + if (err) return cb(err); + + self.conn = conn; + self.conn.domain = process.domain; + + if (self.connectTimeout || self.connectTimeout === 0) { + self.conn.connectTimeout = self.connectTimeout; + } + + if (self.loginTimeout || self.loginTimeout === 0) { + self.conn.loginTimeout = self.loginTimeout; + } + + self.conn.open(connectionString, function (err, result) { + if (err) return cb(err); + + self.connected = true; + + return cb(err, result); + }); + }); +}; + +Database.prototype.openSync = function (connectionString) { + var self = this; + + self.conn = self.odbc.createConnectionSync(); + + if (self.connectTimeout || self.connectTimeout === 0) { + self.conn.connectTimeout = self.connectTimeout; + } + + if (self.loginTimeout || self.loginTimeout === 0) { + self.conn.loginTimeout = self.loginTimeout; + } + + if (typeof(connectionString) == "object") { + var obj = connectionString; + connectionString = ""; + + Object.keys(obj).forEach(function (key) { + connectionString += key + "=" + obj[key] + ";"; + }); + } + + var result = self.conn.openSync(connectionString); + + if (result) { + self.connected = true; + } + + return result; +} + +Database.prototype.close = function (cb) { + var self = this; + + self.queue.push(function (next) { + //check to see if conn still exists (it's deleted when closed) + if (!self.conn) { + if (cb) cb(null); + return next(); + } + + self.conn.close(function (err) { + self.connected = false; + delete self.conn; + + if (cb) cb(err); + return next(); + }); + }); +}; + +Database.prototype.closeSync = function () { + var self = this; + + var result = self.conn.closeSync(); + + self.connected = false; + delete self.conn; + + return result +} + +Database.prototype.query = function (sql, params, cb) { + var self = this; + + if (typeof(params) == 'function') { + cb = params; + params = null; + } + + if (!self.connected) { + return cb({ message : "Connection not open."}, [], false); + } + + self.queue.push(function (next) { + function cbQuery (initialErr, result) { + fetchMore(); + + function fetchMore() { + if (self.fetchMode) { + result.fetchMode = self.fetchMode; + } + + result.fetchAll(function (err, data) { + var moreResults, moreResultsError = null; + + try { + moreResults = result.moreResultsSync(); + } + catch (e) { + moreResultsError = e; + //force to check for more results + moreResults = true; + } + + //close the result before calling back + //if there are not more result sets + if (!moreResults) { + result.closeSync(); + } + + cb(err || initialErr, data, moreResults); + initialErr = null; + + while (moreResultsError) { + try { + moreResults = result.moreResultsSync(); + cb(moreResultsError, [], moreResults); // No errors left - still need to report the + // last one, though + moreResultsError = null; + } catch (e) { + cb(moreResultsError, [], moreResults); + moreResultsError = e; + } + } + + if (moreResults) { + return fetchMore(); + } + else { + return next(); + } + }); + } + } + + if (params) { + self.conn.query(sql, params, cbQuery); + } + else { + self.conn.query(sql, cbQuery); + } + }); +}; + +Database.prototype.queryResult = function (sql, params, cb) { + var self = this; + + if (typeof(params) == 'function') { + cb = params; + params = null; + } + + if (!self.connected) { + return cb({ message : "Connection not open."}, null); + } + + self.queue.push(function (next) { + //ODBCConnection.query() is the fastest-path querying mechanism. + if (params) { + self.conn.query(sql, params, cbQuery); + } + else { + self.conn.query(sql, cbQuery); + } + + function cbQuery (err, result) { + if (err) { + cb(err, null); + + return next(); + } + + if (self.fetchMode) { + result.fetchMode = self.fetchMode; + } + + cb(err, result); + + return next(); + } + }); +}; + +Database.prototype.queryResultSync = function (sql, params) { + var self = this, result; + + if (!self.connected) { + throw ({ message : "Connection not open."}); + } + + if (params) { + result = self.conn.querySync(sql, params); + } + else { + result = self.conn.querySync(sql); + } + + if (self.fetchMode) { + result.fetchMode = self.fetchMode; + } + + return result; +}; + +Database.prototype.querySync = function (sql, params) { + var self = this, result; + + if (!self.connected) { + throw ({ message : "Connection not open."}); + } + + if (params) { + result = self.conn.querySync(sql, params); + } + else { + result = self.conn.querySync(sql); + } + + if (self.fetchMode) { + result.fetchMode = self.fetchMode; + } + + var data = result.fetchAllSync(); + + result.closeSync(); + + return data; +}; + +Database.prototype.beginTransaction = function (cb) { + var self = this; + + self.conn.beginTransaction(cb); + + return self; +}; + +Database.prototype.endTransaction = function (rollback, cb) { + var self = this; + + self.conn.endTransaction(rollback, cb); + + return self; +}; + +Database.prototype.commitTransaction = function (cb) { + var self = this; + + self.conn.endTransaction(false, cb); //don't rollback + + return self; +}; + +Database.prototype.rollbackTransaction = function (cb) { + var self = this; + + self.conn.endTransaction(true, cb); //rollback + + return self; +}; + +Database.prototype.beginTransactionSync = function () { + var self = this; + + self.conn.beginTransactionSync(); + + return self; +}; + +Database.prototype.endTransactionSync = function (rollback) { + var self = this; + + self.conn.endTransactionSync(rollback); + + return self; +}; + +Database.prototype.commitTransactionSync = function () { + var self = this; + + self.conn.endTransactionSync(false); //don't rollback + + return self; +}; + +Database.prototype.rollbackTransactionSync = function () { + var self = this; + + self.conn.endTransactionSync(true); //rollback + + return self; +}; + +Database.prototype.columns = function(catalog, schema, table, column, callback) { + var self = this; + if (!self.queue) self.queue = []; + + callback = callback || arguments[arguments.length - 1]; + + self.queue.push(function (next) { + self.conn.columns(catalog, schema, table, column, function (err, result) { + if (err) return callback(err, [], false); + + result.fetchAll(function (err, data) { + result.closeSync(); + + callback(err, data); + + return next(); + }); + }); + }); +}; + +Database.prototype.tables = function(catalog, schema, table, type, callback) { + var self = this; + if (!self.queue) self.queue = []; + + callback = callback || arguments[arguments.length - 1]; + + self.queue.push(function (next) { + self.conn.tables(catalog, schema, table, type, function (err, result) { + if (err) return callback(err, [], false); + + result.fetchAll(function (err, data) { + result.closeSync(); + + callback(err, data); + + return next(); + }); + }); + }); +}; + +Database.prototype.describe = function(obj, callback) { + var self = this; + + if (typeof(callback) != "function") { + throw({ + error : "[node-odbc] Missing Arguments", + message : "You must specify a callback function in order for the describe method to work." + }); + + return false; + } + + if (typeof(obj) != "object") { + callback({ + error : "[node-odbc] Missing Arguments", + message : "You must pass an object as argument 0 if you want anything productive to happen in the describe method." + }, []); + + return false; + } + + if (!obj.database) { + callback({ + error : "[node-odbc] Missing Arguments", + message : "The object you passed did not contain a database property. This is required for the describe method to work." + }, []); + + return false; + } + + //set some defaults if they weren't passed + obj.schema = obj.schema || "%"; + obj.type = obj.type || "table"; + + if (obj.table && obj.column) { + //get the column details + self.columns(obj.database, obj.schema, obj.table, obj.column, callback); + } + else if (obj.table) { + //get the columns in the table + self.columns(obj.database, obj.schema, obj.table, "%", callback); + } + else { + //get the tables in the database + self.tables(obj.database, obj.schema, null, obj.type || "table", callback); + } +}; + +Database.prototype.prepare = function (sql, cb) { + var self = this; + + self.conn.createStatement(function (err, stmt) { + if (err) return cb(err); + + stmt.queue = new SimpleQueue(); + + stmt.prepare(sql, function (err) { + if (err) return cb(err); + + return cb(null, stmt); + }); + }); +} + +Database.prototype.prepareSync = function (sql, cb) { + var self = this; + + var stmt = self.conn.createStatementSync(); + + stmt.queue = new SimpleQueue(); + + stmt.prepareSync(sql); + + return stmt; +} + +//Proxy all of the asynchronous functions so that they are queued +odbc.ODBCStatement.prototype._execute = odbc.ODBCStatement.prototype.execute; +odbc.ODBCStatement.prototype._executeDirect = odbc.ODBCStatement.prototype.executeDirect; +odbc.ODBCStatement.prototype._executeNonQuery = odbc.ODBCStatement.prototype.executeNonQuery; +odbc.ODBCStatement.prototype._prepare = odbc.ODBCStatement.prototype.prepare; +odbc.ODBCStatement.prototype._bind = odbc.ODBCStatement.prototype.bind; + +odbc.ODBCStatement.prototype.execute = function (params, cb) { + var self = this; + + self.queue = self.queue || new SimpleQueue(); + + if (!cb) { + cb = params; + params = null; + } + + self.queue.push(function (next) { + //If params were passed to this function, then bind them and + //then execute. + if (params) { + self._bind(params, function (err) { + if (err) { + return cb(err); + } + + self._execute(function (err, result) { + cb(err, result); + + return next(); + }); + }); + } + //Otherwise execute and pop the next bind call + else { + self._execute(function (err, result) { + cb(err, result); + + //NOTE: We only execute the next queued bind call after + // we have called execute() or executeNonQuery(). This ensures + // that we don't call a bind() a bunch of times without ever + // actually executing that bind. Not + self.bindQueue && self.bindQueue.next(); + + return next(); + }); + } + }); +}; + +odbc.ODBCStatement.prototype.executeDirect = function (sql, cb) { + var self = this; + + self.queue = self.queue || new SimpleQueue(); + + self.queue.push(function (next) { + self._executeDirect(sql, function (err, result) { + cb(err, result); + + return next(); + }); + }); +}; + +odbc.ODBCStatement.prototype.executeNonQuery = function (params, cb) { + var self = this; + + self.queue = self.queue || new SimpleQueue(); + + if (!cb) { + cb = params; + params = null; + } + + self.queue.push(function (next) { + //If params were passed to this function, then bind them and + //then executeNonQuery. + if (params) { + self._bind(params, function (err) { + if (err) { + return cb(err); + } + + self._executeNonQuery(function (err, result) { + cb(err, result); + + return next(); + }); + }); + } + //Otherwise executeNonQuery and pop the next bind call + else { + self._executeNonQuery(function (err, result) { + cb(err, result); + + //NOTE: We only execute the next queued bind call after + // we have called execute() or executeNonQuery(). This ensures + // that we don't call a bind() a bunch of times without ever + // actually executing that bind. Not + self.bindQueue && self.bindQueue.next(); + + return next(); + }); + } + }); +}; + +odbc.ODBCStatement.prototype.prepare = function (sql, cb) { + var self = this; + + self.queue = self.queue || new SimpleQueue(); + + self.queue.push(function (next) { + self._prepare(sql, function (err) { + cb(err); + + return next(); + }); + }); +}; + +odbc.ODBCStatement.prototype.bind = function (ary, cb) { + var self = this; + + self.bindQueue = self.bindQueue || new SimpleQueue(); + + self.bindQueue.push(function () { + self._bind(ary, function (err) { + cb(err); + + //NOTE: we do not call next() here because + //we want to pop the next bind call only + //after the next execute call + }); + }); +}; + + +//proxy the ODBCResult fetch function so that it is queued +odbc.ODBCResult.prototype._fetch = odbc.ODBCResult.prototype.fetch; + +odbc.ODBCResult.prototype.fetch = function (cb) { + var self = this; + + self.queue = self.queue || new SimpleQueue(); + + self.queue.push(function (next) { + self._fetch(function (err, data) { + if (cb) cb(err, data); + + return next(); + }); + }); +}; + +module.exports.Pool = Pool; + +Pool.count = 0; + +function Pool (options) { + var self = this; + self.index = Pool.count++; + self.availablePool = {}; + self.usedPool = {}; + self.odbc = new odbc.ODBC(); + self.options = options || {} + self.options.odbc = self.odbc; +} + +Pool.prototype.open = function (connectionString, callback) { + var self = this + , db + ; + + //check to see if we already have a connection for this connection string + if (self.availablePool[connectionString] && self.availablePool[connectionString].length) { + db = self.availablePool[connectionString].shift() + self.usedPool[connectionString].push(db) + + callback(null, db); + } + else { + db = new Database(self.options); + db.realClose = db.close; + + db.close = function (cb) { + //call back early, we can do the rest of this stuff after the client thinks + //that the connection is closed. + cb(null); + + + //close the connection for real + //this will kill any temp tables or anything that might be a security issue. + db.realClose(function () { + //remove this db from the usedPool + self.usedPool[connectionString].splice(self.usedPool[connectionString].indexOf(db), 1); + + //re-open the connection using the connection string + db.open(connectionString, function (error) { + if (error) { + console.error(error); + return; + } + + //add this clean connection to the connection pool + self.availablePool[connectionString] = self.availablePool[connectionString] || []; + self.availablePool[connectionString].push(db); + exports.debug && console.dir(self); + }); + }); + }; + + db.open(connectionString, function (error) { + exports.debug && console.log("odbc.js : pool[%s] : pool.db.open callback()", self.index); + + self.usedPool[connectionString] = self.usedPool[connectionString] || []; + self.usedPool[connectionString].push(db); + + callback(error, db); + }); + } +}; + +Pool.prototype.close = function (callback) { + var self = this + , required = 0 + , received = 0 + , connections + , key + , x + ; + + exports.debug && console.log("odbc.js : pool[%s] : pool.close()", self.index); + //we set a timeout because a previous db.close() may + //have caused the a behind the scenes db.open() to prepare + //a new connection + setTimeout(function () { + //merge the available pool and the usedPool + var pools = {}; + + for (key in self.availablePool) { + pools[key] = (pools[key] || []).concat(self.availablePool[key]); + } + + for (key in self.usedPool) { + pools[key] = (pools[key] || []).concat(self.usedPool[key]); + } + + exports.debug && console.log("odbc.js : pool[%s] : pool.close() - setTimeout() callback", self.index); + exports.debug && console.dir(pools); + + if (Object.keys(pools).length == 0) { + return callback(); + } + + for (key in pools) { + connections = pools[key]; + required += connections.length; + + exports.debug && console.log("odbc.js : pool[%s] : pool.close() - processing pools %s - connections: %s", self.index, key, connections.length); + + for (x = 0 ; x < connections.length; x ++) { + (function (x) { + //call the realClose method to avoid + //automatically re-opening the connection + exports.debug && console.log("odbc.js : pool[%s] : pool.close() - calling realClose() for connection #%s", self.index, x); + + connections[x].realClose(function () { + exports.debug && console.log("odbc.js : pool[%s] : pool.close() - realClose() callback for connection #%s", self.index, x); + received += 1; + + if (received === required) { + callback(); + + //prevent mem leaks + self = null; + connections = null; + required = null; + received = null; + key = null; + + return; + } + }); + })(x); + } + } + }, 2000); +}; diff --git a/lib/simple-queue.js b/lib/simple-queue.js new file mode 100644 index 00000000..a6f784e1 --- /dev/null +++ b/lib/simple-queue.js @@ -0,0 +1,40 @@ +module.exports = SimpleQueue; + +function SimpleQueue() { + var self = this; + + self.fifo = []; + self.executing = false; +} + +SimpleQueue.prototype.push = function (fn) { + var self = this; + + self.fifo.push(fn); + + self.maybeNext(); +}; + +SimpleQueue.prototype.maybeNext = function () { + var self = this; + + if (!self.executing) { + self.next(); + } +}; + +SimpleQueue.prototype.next = function () { + var self = this; + + if (self.fifo.length) { + var fn = self.fifo.shift(); + + self.executing = true; + + fn(function () { + self.executing = false; + + self.maybeNext(); + }); + } +}; \ No newline at end of file diff --git a/odbc.js b/odbc.js deleted file mode 100644 index 7c34f795..00000000 --- a/odbc.js +++ /dev/null @@ -1,270 +0,0 @@ -/* - Copyright (c) 2010, Lee Smith - - Permission to use, copy, modify, and/or distribute this software for any - purpose with or without fee is hereby granted, provided that the above - copyright notice and this permission notice appear in all copies. - - THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. -*/ - -var odbc; - -try { - odbc = require("./odbc_bindings"); -} catch (e) { - try { - odbc = require("./build/default/odbc_bindings"); - } catch (e) { - odbc = require("./build/Release/odbc_bindings"); - } -} - -var Database = exports.Database = function () { - var self = this; - var db = new odbc.Database(); - db.executing = false; - db.connected = false; - db.queue = []; - - db.__proto__ = Database.prototype; - - return db; -}; - -Database.prototype = { - __proto__: odbc.Database.prototype, - constructor: Database, -}; - -Database.prototype.processQueue = function () { - var self = this; - - if (!self.queue) self.queue = []; - - if (self.connected && !self.executing && self.queue.length) { - var currentQuery = self.queue[0]; - self.executing = true; - - currentQuery.method.apply(currentQuery.context, currentQuery.args); //TODO: we need to make sure we aren't sending any extra arguments to the cpp method - } -}; - -Database.prototype.query = function(sql, params, callback) { - var self = this, args = []; - - if (callback == null) { - callback = params; // no parameters supplied - params = null; - } - - if (!self.connected) { - return callback( { message : "Connection not open." }, [], false ); - } - - if (!self.queue) self.queue = []; - - args.push(sql); - - if (params) { - args.push(params); - } - - args.push(function (error, rows, morefollowing) { - //check to see if this is the last result set returned - if (!morefollowing) { - self.queue.shift(); - self.executing = false; - } - - if (callback) callback.apply(self, arguments); - - self.processQueue(); - }); - - self.queue.push({ - context : self, - method : self.dispatchQuery, - args : args - }); - - self.processQueue(); -}; - -Database.prototype.open = function(connectionString, callback) { - var self = this; - - if (self.connected) { - return callback( { message : "Connection already open." }, [], false); - } - - self.dispatchOpen(connectionString, function (err) { - self.connected = true; - self.processQueue(); - - return callback(err); - }); -}; - -/** - * - * We must queue the close. If we don't then we may close during the middle of a query which - * could cause a segfault or other madness - * - **/ - -Database.prototype.close = function(callback) { - var self = this; - - if (!self.queue) self.queue = []; - - self.queue.push({ - context : self, - method : self.dispatchClose, - args : [function (err) { - self.queue = []; - self.connected = false; - self.executing = false; - - if (err && !callback) throw err; - else if (callback) callback(err) - }] - }); - - self.processQueue(); -}; - -Database.prototype.tables = function(catalog, schema, table, type, callback) { - var self = this; - if (!self.queue) self.queue = []; - - self.queue.push({ - context : self, - method : self.dispatchTables, - catalog : (arguments.length > 1) ? catalog : "", - schema : (arguments.length > 2) ? schema : "", - table : (arguments.length > 3) ? table : "", - type : (arguments.length > 4) ? type : "", - callback : (arguments.length == 5) ? callback : arguments[arguments.length - 1], - args : arguments - }); - - self.processQueue(); -}; - -Database.prototype.columns = function(catalog, schema, table, column, callback) { - var self = this; - if (!self.queue) self.queue = []; - - self.queue.push({ - context : self, - method : self.dispatchColumns, - catalog : (arguments.length > 1) ? catalog : "", - schema : (arguments.length > 2) ? schema : "", - table : (arguments.length > 3) ? table : "", - column : (arguments.length > 4) ? column : "", - callback : (arguments.length == 5) ? callback : arguments[arguments.length - 1], - args : arguments - }); - - self.processQueue(); -}; - -Database.prototype.describe = function(obj, callback) { - var self = this; - - if (typeof(callback) != "function") { - throw({ - error : "[node-odbc] Missing Arguments", - message : "You must specify a callback function in order for the describe method to work." - }); - - return false; - } - - if (typeof(obj) != "object") { - callback({ - error : "[node-odbc] Missing Arguments", - message : "You must pass an object as argument 0 if you want anything productive to happen in the describe method." - }, []); - - return false; - } - - if (!obj.database) { - callback({ - error : "[node-odbc] Missing Arguments", - message : "The object you passed did not contain a database property. This is required for the describe method to work." - }, []); - - return false; - } - - //set some defaults if they weren't passed - obj.schema = obj.schema || "%"; - obj.type = obj.type || "table"; - - if (obj.table && obj.column) { - //get the column details - self.columns(obj.database, obj.schema, obj.table, obj.column, callback); - } - else if (obj.table) { - //get the columns in the table - self.columns(obj.database, obj.schema, obj.table, "%", callback); - } - else { - //get the tables in the database - self.tables(obj.database, obj.schema, null, obj.type || "table", callback); - } -}; - - -var Pool = exports.Pool = function () { - var self = this; - - self.connectionPool = {}; -} - -Pool.prototype.open = function (connectionString, callback) { - var self = this; - - //check to see if we already have a connection for this connection string - if (self.connectionPool[connectionString] && self.connectionPool[connectionString].length) { - callback(null, self.connectionPool[connectionString].shift()); - } - else { - var db = new Database(); - db.realClose = db.close; - - db.close = function (cb) { - //call back early, we can do the rest of this stuff after the client thinks - //that the connection is closed. - cb(null); - - - //close the connection for real - //this will kill any temp tables or anything that might be a security issue. - db.realClose( function () { - - //re-open the connection using the connection string - db.open(connectionString, function (error) { - - //add this clean connection to the connection pool - self.connectionPool[connectionString] = self.connectionPool[connectionString] || []; - self.connectionPool[connectionString].push(db); - - }); - }); - }; - - db.open(connectionString, function (error) { - callback(error, db); - }); - } -}; diff --git a/odbc_bindings.node b/odbc_bindings.node deleted file mode 120000 index 01a8ba84..00000000 --- a/odbc_bindings.node +++ /dev/null @@ -1 +0,0 @@ -build/Release/odbc_bindings.node \ No newline at end of file diff --git a/package-lock.json b/package-lock.json new file mode 100644 index 00000000..17417d41 --- /dev/null +++ b/package-lock.json @@ -0,0 +1,18 @@ +{ + "name": "odbc", + "version": "1.4.5", + "lockfileVersion": 1, + "requires": true, + "dependencies": { + "bindings": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/bindings/-/bindings-1.3.0.tgz", + "integrity": "sha1-s0b27PapX1qBXFg5/HzbIlAvHtc=" + }, + "nan": { + "version": "2.10.0", + "resolved": "https://npm.paviliongift.com/nan/-/nan-2.10.0.tgz", + "integrity": "sha1-ltDNYQ69WNS03pzAxoKM2pnHVI8=" + } + } +} diff --git a/package.json b/package.json index 04ffba6c..98f8a0b3 100644 --- a/package.json +++ b/package.json @@ -1,20 +1,40 @@ { "name": "odbc", "description": "unixodbc bindings for node", - "version": "0.3.1", - "homepage": "http://github.com/w1nk/node-odbc/", + "version": "1.4.6", + "main": "lib/odbc.js", + "types": "./lib/odbc.d.ts", + "homepage": "http://github.com/wankdanker/node-odbc/", "repository": { "type": "git", - "url": "git://github.com/w1nk/node-odbc.git" + "url": "git://github.com/wankdanker/node-odbc.git" }, - "author": "Lee Smith ", + "bugs": { + "url": "https://github.com/w1nk/node-odbc/issues" + }, + "contributors": [ + { + "name": "Dan VerWeire", + "email": "dverweire@gmail.com" + }, + { + "name": "Lee Smith", + "email": "notwink@gmail.com" + } + ], "directories": { "lib": "." }, "engines": { - "node": "*" + "node": ">=0.8.0" + }, + "scripts": { + "install": "node-gyp configure build", + "test": "cd test && node run-tests.js" + }, + "dependencies": { + "bindings": "^1.3.0", + "nan": "^2.10.0" }, - "scripts": { - "preinstall":"node-waf configure build" - } + "gypfile": true } diff --git a/src/Database.cpp b/src/Database.cpp deleted file mode 100644 index 757d3697..00000000 --- a/src/Database.cpp +++ /dev/null @@ -1,861 +0,0 @@ -/* - Copyright (c) 2010, Lee Smith - - Permission to use, copy, modify, and/or distribute this software for any - purpose with or without fee is hereby granted, provided that the above - copyright notice and this permission notice appear in all copies. - - THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. -*/ - -#include -#include -#include -#include - -#include "Database.h" - -#define MAX_FIELD_SIZE 1024 -#define MAX_VALUE_SIZE 1048576 - -using namespace v8; -using namespace node; - -typedef struct { - unsigned char *name; - unsigned int len; - SQLLEN type; -} Column; - -pthread_mutex_t Database::m_odbcMutex; - -void Database::Init(v8::Handle target) { - HandleScope scope; - - Local t = FunctionTemplate::New(New); - - constructor_template = Persistent::New(t); - constructor_template->InstanceTemplate()->SetInternalFieldCount(1); - constructor_template->SetClassName(String::NewSymbol("Database")); - - NODE_SET_PROTOTYPE_METHOD(constructor_template, "dispatchOpen", Open); - NODE_SET_PROTOTYPE_METHOD(constructor_template, "dispatchClose", Close); - NODE_SET_PROTOTYPE_METHOD(constructor_template, "dispatchQuery", Query); - NODE_SET_PROTOTYPE_METHOD(constructor_template, "dispatchTables", Tables); - NODE_SET_PROTOTYPE_METHOD(constructor_template, "dispatchColumns", Columns); - - target->Set(v8::String::NewSymbol("Database"), constructor_template->GetFunction()); - scope.Close(Undefined()); - pthread_mutex_init(&Database::m_odbcMutex, NULL); -} - -Handle Database::New(const Arguments& args) { - HandleScope scope; - Database* dbo = new Database(); - dbo->Wrap(args.This()); - scope.Close(Undefined()); - return args.This(); -} - -int Database::EIO_AfterOpen(eio_req *req) { - ev_unref(EV_DEFAULT_UC); - HandleScope scope; - struct open_request *open_req = (struct open_request *)(req->data); - - Local argv[1]; - bool err = false; - if (req->result) { - err = true; - argv[0] = Exception::Error(String::New("Error opening database")); - } - - TryCatch try_catch; - - open_req->dbo->Unref(); - open_req->cb->Call(Context::GetCurrent()->Global(), err ? 1 : 0, argv); - - if (try_catch.HasCaught()) { - FatalException(try_catch); - } - - open_req->cb.Dispose(); - - free(open_req); - scope.Close(Undefined()); - return 0; -} - -void Database::EIO_Open(eio_req *req) { - struct open_request *open_req = (struct open_request *)(req->data); - Database *self = open_req->dbo->self(); - pthread_mutex_lock(&Database::m_odbcMutex); - int ret = SQLAllocEnv( &self->m_hEnv ); - if( ret == SQL_SUCCESS ) { - ret = SQLAllocConnect( self->m_hEnv,&self->m_hDBC ); - if( ret == SQL_SUCCESS ) { - SQLSetConnectOption( self->m_hDBC,SQL_LOGIN_TIMEOUT,5 ); - char connstr[1024]; - ret = SQLDriverConnect(self->m_hDBC,NULL,(SQLCHAR*)open_req->connection,strlen(open_req->connection),(SQLCHAR*)connstr,1024,NULL,SQL_DRIVER_NOPROMPT); - - if( ret == SQL_SUCCESS || ret == SQL_SUCCESS_WITH_INFO ) - { - ret = SQLAllocStmt( self->m_hDBC,&self->m_hStmt ); - if (ret != SQL_SUCCESS) printf("not connected\n"); - - if ( !SQL_SUCCEEDED( SQLGetFunctions(self->m_hDBC, SQL_API_SQLMORERESULTS, &self->canHaveMoreResults))) - { - self->canHaveMoreResults = 0; - } - } - else - { - self->printError("SQLDriverConnect", self->m_hDBC, SQL_HANDLE_DBC); - } - } - } - pthread_mutex_unlock(&Database::m_odbcMutex); - req->result = ret; -} - -Handle Database::Open(const Arguments& args) { - HandleScope scope; - - REQ_STR_ARG(0, connection); - REQ_FUN_ARG(1, cb); - - Database* dbo = ObjectWrap::Unwrap(args.This()); - - struct open_request *open_req = (struct open_request *) - calloc(1, sizeof(struct open_request) + connection.length()); - - if (!open_req) { - V8::LowMemoryNotification(); - return ThrowException(Exception::Error(String::New("Could not allocate enough memory"))); - } - - strcpy(open_req->connection, *connection); - open_req->cb = Persistent::New(cb); - open_req->dbo = dbo; - - eio_custom(EIO_Open, EIO_PRI_DEFAULT, EIO_AfterOpen, open_req); - - ev_ref(EV_DEFAULT_UC); - dbo->Ref(); - scope.Close(Undefined()); - return Undefined(); -} - -int Database::EIO_AfterClose(eio_req *req) { - ev_unref(EV_DEFAULT_UC); - - HandleScope scope; - - struct close_request *close_req = (struct close_request *)(req->data); - - Local argv[1]; - bool err = false; - if (req->result) { - err = true; - argv[0] = Exception::Error(String::New("Error closing database")); - } - - TryCatch try_catch; - - close_req->dbo->Unref(); - close_req->cb->Call(Context::GetCurrent()->Global(), err ? 1 : 0, argv); - - if (try_catch.HasCaught()) { - FatalException(try_catch); - } - - close_req->cb.Dispose(); - - free(close_req); - scope.Close(Undefined()); - return 0; -} - -void Database::EIO_Close(eio_req *req) { - struct close_request *close_req = (struct close_request *)(req->data); - Database* dbo = close_req->dbo; - pthread_mutex_lock(&Database::m_odbcMutex); - SQLDisconnect(dbo->m_hDBC); - SQLFreeHandle(SQL_HANDLE_ENV, dbo->m_hEnv); - SQLFreeHandle(SQL_HANDLE_DBC, dbo->m_hDBC); - pthread_mutex_unlock(&Database::m_odbcMutex); -} - -Handle Database::Close(const Arguments& args) { - HandleScope scope; - - REQ_FUN_ARG(0, cb); - - Database* dbo = ObjectWrap::Unwrap(args.This()); - - struct close_request *close_req = (struct close_request *) - calloc(1, sizeof(struct close_request)); - - if (!close_req) { - V8::LowMemoryNotification(); - return ThrowException(Exception::Error(String::New("Could not allocate enough memory"))); - } - - close_req->cb = Persistent::New(cb); - close_req->dbo = dbo; - - eio_custom(EIO_Close, EIO_PRI_DEFAULT, EIO_AfterClose, close_req); - - ev_ref(EV_DEFAULT_UC); - dbo->Ref(); - scope.Close(Undefined()); - return Undefined(); -} - -int Database::EIO_AfterQuery(eio_req *req) { - ev_unref(EV_DEFAULT_UC); - - struct query_request *prep_req = (struct query_request *)(req->data); - struct tm timeInfo = { 0 }; //used for processing date/time datatypes - - HandleScope scope; - - Database *self = prep_req->dbo->self(); //an easy reference to the Database object - Local objError = Object::New(); //our error object which we will use if we discover errors while processing the result set - - short colCount = 0; //used to keep track of the number of columns received in a result set - short emitCount = 0; //used to keep track of the number of event emittions that have occurred - short errorCount = 0; //used to keep track of the number of errors that have been found - - SQLSMALLINT buflen; //used as a place holder for the length of column names - SQLRETURN ret; //used to capture the return value from various SQL function calls - - char *buf = (char *) malloc(MAX_VALUE_SIZE); //allocate a buffer for incoming column values - - //check to make sure malloc succeeded - if (buf == NULL) { - //malloc failed, set an error message - objError->Set(String::New("error"), String::New("[node-odbc] Failed Malloc")); - objError->Set(String::New("message"), String::New("An attempt to allocate memory failed. This allocation was for a value buffer of incoming recordset values.")); - - //emit an error event immidiately. - Local args[3]; - args[0] = objError; - args[1] = Local::New(Null()); - args[2] = Local::New(False()); - - //emit an error event - prep_req->cb->Call(Context::GetCurrent()->Global(), 3, args); - - //emit a result event - goto cleanupshutdown; - } - //else { - //malloc succeeded so let's continue -- I'm not too fond of having all this code in the else statement, but I don't know what else to do... - // you could use goto ;-) - - memset(buf,0,MAX_VALUE_SIZE); //set all of the bytes of the buffer to 0; I tried doing this inside the loop, but it increased processing time dramatically - - - //First thing, let's check if the execution of the query returned any errors (in EIO_Query) - if(req->result == SQL_ERROR) - { - errorCount++; - - char errorMessage[512]; - char errorSQLState[128]; - SQLError(self->m_hEnv, self->m_hDBC, self->m_hStmt,(SQLCHAR *)errorSQLState,NULL,(SQLCHAR *)errorMessage, sizeof(errorMessage), NULL); - objError->Set(String::New("state"), String::New(errorSQLState)); - objError->Set(String::New("error"), String::New("[node-odbc] SQL_ERROR")); - objError->Set(String::New("message"), String::New(errorMessage)); - - //only set the query value of the object if we actually have a query - if (prep_req->sql != NULL) { - objError->Set(String::New("query"), String::New(prep_req->sql)); - } - - //emit an error event immidiately. - Local args[1]; - args[0] = objError; - prep_req->cb->Call(Context::GetCurrent()->Global(), 1, args); - //self->Emit(String::New("error"), 1, args); - goto cleanupshutdown; - } - - //loop through all result sets - do { - colCount = 0; //always reset colCount for the current result set to 0; - - SQLNumResultCols(self->m_hStmt, &colCount); - Column *columns = new Column[colCount]; - - Local rows = Array::New(); - - if (colCount > 0) { - // retrieve and store column attributes to build the row object - for(int i = 0; i < colCount; i++) - { - columns[i].name = new unsigned char[MAX_FIELD_SIZE]; - - //zero out the space where the column name will be stored - memset(columns[i].name, 0, MAX_FIELD_SIZE); - - //get the column name - ret = SQLColAttribute(self->m_hStmt, (SQLUSMALLINT)i+1, SQL_DESC_LABEL, columns[i].name, (SQLSMALLINT)MAX_FIELD_SIZE, (SQLSMALLINT *)&buflen, NULL); - - //store the len attribute - columns[i].len = buflen; - - //get the column type and store it directly in column[i].type - ret = SQLColAttribute( self->m_hStmt, (SQLUSMALLINT)i+1, SQL_COLUMN_TYPE, NULL, 0, NULL, &columns[i].type ); - } - - int count = 0; - - // i dont think odbc will tell how many rows are returned, loop until out... - while(true) - { - Local tuple = Object::New(); - ret = SQLFetch(self->m_hStmt); - - //TODO: Do something to enable/disable dumping these info messages to the console. - if (ret == SQL_SUCCESS_WITH_INFO ) { - char errorMessage[512]; - char errorSQLState[128]; - SQLError(self->m_hEnv, self->m_hDBC, self->m_hStmt,(SQLCHAR *)errorSQLState,NULL,(SQLCHAR *)errorMessage, sizeof(errorMessage), NULL); - - //printf("EIO_Query ret => %i\n", ret); - printf("EIO_Query => %s\n", errorMessage); - printf("EIO_Query => %s\n", errorSQLState); - //printf("EIO_Query sql => %s\n", prep_req->sql); - } - - if (ret == SQL_ERROR) { - char errorMessage[512]; - char errorSQLState[128]; - SQLError(self->m_hEnv, self->m_hDBC, self->m_hStmt,(SQLCHAR *)errorSQLState,NULL,(SQLCHAR *)errorMessage, sizeof(errorMessage), NULL); - - errorCount++; - objError->Set(String::New("state"), String::New(errorSQLState)); - objError->Set(String::New("error"), String::New("[node-odbc] SQL_ERROR")); - objError->Set(String::New("message"), String::New(errorMessage)); - objError->Set(String::New("query"), String::New(prep_req->sql)); - - //emit an error event immidiately. - Local args[1]; - args[0] = objError; - prep_req->cb->Call(Context::GetCurrent()->Global(), 1, args); - - break; - } - - if (ret == SQL_NO_DATA) { - break; - } - - for(int i = 0; i < colCount; i++) - { - SQLLEN len; - - // SQLGetData can supposedly return multiple chunks, need to do this to retrieve large fields - int ret = SQLGetData(self->m_hStmt, i+1, SQL_CHAR, (char *) buf, MAX_VALUE_SIZE-1, (SQLLEN *) &len); - - //printf("%s %i\n", columns[i].name, columns[i].type); - - if(ret == SQL_NULL_DATA || len < 0) - { - tuple->Set(String::New((const char *)columns[i].name), Null()); - } - else - { - switch (columns[i].type) { - case SQL_NUMERIC : - tuple->Set(String::New((const char *)columns[i].name), Number::New(atof(buf))); - break; - case SQL_DECIMAL : - tuple->Set(String::New((const char *)columns[i].name), Number::New(atof(buf))); - break; - case SQL_INTEGER : - tuple->Set(String::New((const char *)columns[i].name), Number::New(atof(buf))); - break; - case SQL_SMALLINT : - tuple->Set(String::New((const char *)columns[i].name), Number::New(atof(buf))); - break; - case SQL_BIGINT : - tuple->Set(String::New((const char *)columns[i].name), Number::New(atof(buf))); - break; - case SQL_FLOAT : - tuple->Set(String::New((const char *)columns[i].name), Number::New(atof(buf))); - break; - case SQL_REAL : - tuple->Set(String::New((const char *)columns[i].name), Number::New(atof(buf))); - break; - case SQL_DOUBLE : - tuple->Set(String::New((const char *)columns[i].name), Number::New(atof(buf))); - break; - case SQL_DATETIME : - case SQL_TIMESTAMP : - //I am not sure if this is locale-safe or cross database safe, but it works for me on MSSQL - strptime(buf, "%Y-%m-%d %H:%M:%S", &timeInfo); - timeInfo.tm_isdst = -1; //a negative value means that mktime() should (use timezone information and system - //databases to) attempt to determine whether DST is in effect at the specified time. - - tuple->Set(String::New((const char *)columns[i].name), Date::New(double(mktime(&timeInfo)) * 1000)); - - break; - case SQL_BIT : - //again, i'm not sure if this is cross database safe, but it works for MSSQL - tuple->Set(String::New((const char *)columns[i].name), Boolean::New( ( *buf == '0') ? false : true )); - break; - default : - tuple->Set(String::New((const char *)columns[i].name), String::New(buf)); - break; - } - } - } - - rows->Set(Integer::New(count), tuple); - count++; - } - - for(int i = 0; i < colCount; i++) - { - delete [] columns[i].name; - } - - delete [] columns; - } - - //move to the next result set - ret = SQLMoreResults( self->m_hStmt ); - - if ( ret != SQL_SUCCESS ) { - //there are no more recordsets so free the statement now before we emit - //because as soon as we emit the last recordest, we are clear to submit another query - //which could cause a race condition with freeing and allocating handles. - SQLFreeHandle( SQL_HANDLE_STMT, self->m_hStmt ); - SQLAllocHandle( SQL_HANDLE_STMT, self->m_hDBC, &self->m_hStmt ); - } - - //Only trigger an emit if there are columns OR if this is the last result and none others have been emitted - //odbc will process individual statments like select @something = 1 as a recordset even though it doesn't have - //any columns. We don't want to emit those unless there are actually columns - if (colCount > 0 || ( ret != SQL_SUCCESS && emitCount == 0 )) { - emitCount++; - - Local args[3]; - - if (errorCount) { - args[0] = objError; - } - else { - args[0] = Local::New(Null()); - } - - args[1] = rows; - args[2] = Local::New(( ret == SQL_SUCCESS ) ? True() : False() ); //true or false, are there more result sets to follow this emit? - - prep_req->cb->Call(Context::GetCurrent()->Global(), 3, args); - } - } - while ( self->canHaveMoreResults && ret == SQL_SUCCESS ); - //} //end of malloc check -cleanupshutdown: - TryCatch try_catch; - - self->Unref(); - - if (try_catch.HasCaught()) { - FatalException(try_catch); - } - - free(buf); - prep_req->cb.Dispose(); - free(prep_req->sql); - free(prep_req->catalog); - free(prep_req->schema); - free(prep_req->table); - free(prep_req->type); - free(prep_req); - scope.Close(Undefined()); - return 0; -} - -void Database::EIO_Query(eio_req *req) { - struct query_request *prep_req = (struct query_request *)(req->data); - Parameter prm; - SQLRETURN ret; - - if(prep_req->dbo->m_hStmt) - { - SQLFreeHandle( SQL_HANDLE_STMT, prep_req->dbo->m_hStmt ); - SQLAllocStmt(prep_req->dbo->m_hDBC,&prep_req->dbo->m_hStmt ); - } - - //check to see if should excute a direct or a parameter bound query - if (!prep_req->paramCount) - { - // execute the query directly - ret = SQLExecDirect( prep_req->dbo->m_hStmt,(SQLCHAR *)prep_req->sql, strlen(prep_req->sql) ); - } - else - { - // prepare statement, bind parameters and execute statement - ret = SQLPrepare(prep_req->dbo->m_hStmt, (SQLCHAR *)prep_req->sql, strlen(prep_req->sql)); - if (ret == SQL_SUCCESS || ret == SQL_SUCCESS_WITH_INFO) - { - for (int i = 0; i < prep_req->paramCount; i++) - { - prm = prep_req->params[i]; - - ret = SQLBindParameter(prep_req->dbo->m_hStmt, i + 1, SQL_PARAM_INPUT, prm.c_type, prm.type, prm.size, 0, prm.buffer, prm.buffer_length, &prm.length); - if (ret == SQL_ERROR) {break;} - } - - if (ret == SQL_SUCCESS || ret == SQL_SUCCESS_WITH_INFO) { - ret = SQLExecute(prep_req->dbo->m_hStmt); - } - } - - // free parameters - // - for (int i = 0; i < prep_req->paramCount; i++) - { - if (prm = prep_req->params[i], prm.buffer != NULL) - { - switch (prm.c_type) - { - case SQL_C_CHAR: free(prm.buffer); break; - case SQL_C_LONG: delete (int64_t *)prm.buffer; break; - case SQL_C_DOUBLE: delete (double *)prm.buffer; break; - case SQL_C_BIT: delete (bool *)prm.buffer; break; - } - } - } - free(prep_req->params); - } - - req->result = ret; // this will be checked later in EIO_AfterQuery - -} - -Handle Database::Query(const Arguments& args) { - HandleScope scope; - - REQ_STR_ARG(0, sql); - - Local cb; - - int paramCount = 0; - Parameter* params; - - Database* dbo = ObjectWrap::Unwrap(args.This()); - - struct query_request *prep_req = (struct query_request *) - calloc(1, sizeof(struct query_request)); - - if (!prep_req) { - V8::LowMemoryNotification(); - return ThrowException(Exception::Error(String::New("Could not allocate enough memory"))); - } - - // populate prep_req->params if parameters were supplied - // - if (args.Length() > 2) - { - if ( !args[1]->IsArray() ) - { - return ThrowException(Exception::TypeError( - String::New("Argument 1 must be an Array")) - ); - } - else if ( !args[2]->IsFunction() ) - { - return ThrowException(Exception::TypeError( - String::New("Argument 2 must be a Function")) - ); - } - - - Local values = Local::Cast(args[1]); - cb = Local::Cast(args[2]); - - prep_req->paramCount = paramCount = values->Length(); - prep_req->params = params = new Parameter[paramCount]; - - for (int i = 0; i < paramCount; i++) - { - Local value = values->Get(i); - - params[i].size = 0; - params[i].length = NULL; - params[i].buffer_length = 0; - - if (value->IsString()) - { - String::Utf8Value string(value); - - params[i].c_type = SQL_C_CHAR; - params[i].type = SQL_VARCHAR; - params[i].length = SQL_NTS; - params[i].buffer = malloc(string.length() + 1); - params[i].buffer_length = string.length() + 1; - params[i].size = string.length() + 1; - - strcpy((char*)params[i].buffer, *string); - } - else if (value->IsNull()) - { - params[i].c_type = SQL_C_DEFAULT; - params[i].type = SQL_NULL_DATA; - params[i].length = SQL_NULL_DATA; - } - else if (value->IsInt32()) - { - int64_t *number = new int64_t(value->IntegerValue()); - params[i].c_type = SQL_C_LONG; - params[i].type = SQL_INTEGER; - params[i].buffer = number; - } - else if (value->IsNumber()) - { - double *number = new double(value->NumberValue()); - params[i].c_type = SQL_C_DOUBLE; - params[i].type = SQL_DECIMAL; - params[i].buffer = number; - } - else if (value->IsBoolean()) - { - bool *boolean = new bool(value->BooleanValue()); - params[i].c_type = SQL_C_BIT; - params[i].type = SQL_BIT; - params[i].buffer = boolean; - } - } - } - else - { - if ( !args[1]->IsFunction() ) - { - return ThrowException(Exception::TypeError( - String::New("Argument 1 must be a Function")) - ); - } - - cb = Local::Cast(args[1]); - - prep_req->paramCount = 0; - } - - prep_req->sql = (char *) malloc(sql.length() +1); - prep_req->catalog = NULL; - prep_req->schema = NULL; - prep_req->table = NULL; - prep_req->type = NULL; - prep_req->column = NULL; - prep_req->cb = Persistent::New(cb); - - strcpy(prep_req->sql, *sql); - - prep_req->dbo = dbo; - - eio_custom(EIO_Query, EIO_PRI_DEFAULT, EIO_AfterQuery, prep_req); - - ev_ref(EV_DEFAULT_UC); - dbo->Ref(); - scope.Close(Undefined()); - return Undefined(); -} - -void Database::EIO_Tables(eio_req *req) { - struct query_request *prep_req = (struct query_request *)(req->data); - - if(prep_req->dbo->m_hStmt) - { - SQLFreeHandle( SQL_HANDLE_STMT, prep_req->dbo->m_hStmt ); - SQLAllocStmt(prep_req->dbo->m_hDBC,&prep_req->dbo->m_hStmt ); - } - - SQLRETURN ret = SQLTables( - prep_req->dbo->m_hStmt, - (SQLCHAR *) prep_req->catalog, SQL_NTS, - (SQLCHAR *) prep_req->schema, SQL_NTS, - (SQLCHAR *) prep_req->table, SQL_NTS, - (SQLCHAR *) prep_req->type, SQL_NTS - ); - - req->result = ret; // this will be checked later in EIO_AfterQuery - - -} - -Handle Database::Tables(const Arguments& args) { - HandleScope scope; - - REQ_STR_OR_NULL_ARG(0, catalog); - REQ_STR_OR_NULL_ARG(1, schema); - REQ_STR_OR_NULL_ARG(2, table); - REQ_STR_OR_NULL_ARG(3, type); - Local cb = Local::Cast(args[4]); - - Database* dbo = ObjectWrap::Unwrap(args.This()); - - struct query_request *prep_req = (struct query_request *) - calloc(1, sizeof(struct query_request)); - - if (!prep_req) { - V8::LowMemoryNotification(); - return ThrowException(Exception::Error(String::New("Could not allocate enough memory"))); - } - - prep_req->sql = NULL; - prep_req->catalog = NULL; - prep_req->schema = NULL; - prep_req->table = NULL; - prep_req->type = NULL; - prep_req->column = NULL; - prep_req->cb = Persistent::New(cb); - - if (!String::New(*catalog)->Equals(String::New("null"))) { - prep_req->catalog = (char *) malloc(catalog.length() +1); - strcpy(prep_req->catalog, *catalog); - } - - if (!String::New(*schema)->Equals(String::New("null"))) { - prep_req->schema = (char *) malloc(schema.length() +1); - strcpy(prep_req->schema, *schema); - } - - if (!String::New(*table)->Equals(String::New("null"))) { - prep_req->table = (char *) malloc(table.length() +1); - strcpy(prep_req->table, *table); - } - - if (!String::New(*type)->Equals(String::New("null"))) { - prep_req->type = (char *) malloc(type.length() +1); - strcpy(prep_req->type, *type); - } - - prep_req->dbo = dbo; - - eio_custom(EIO_Tables, EIO_PRI_DEFAULT, EIO_AfterQuery, prep_req); - - ev_ref(EV_DEFAULT_UC); - dbo->Ref(); - scope.Close(Undefined()); - return Undefined(); -} - -void Database::EIO_Columns(eio_req *req) { - struct query_request *prep_req = (struct query_request *)(req->data); - - if(prep_req->dbo->m_hStmt) - { - SQLFreeHandle( SQL_HANDLE_STMT, prep_req->dbo->m_hStmt ); - SQLAllocStmt(prep_req->dbo->m_hDBC,&prep_req->dbo->m_hStmt ); - } - - SQLRETURN ret = SQLColumns( - prep_req->dbo->m_hStmt, - (SQLCHAR *) prep_req->catalog, SQL_NTS, - (SQLCHAR *) prep_req->schema, SQL_NTS, - (SQLCHAR *) prep_req->table, SQL_NTS, - (SQLCHAR *) prep_req->column, SQL_NTS - ); - - req->result = ret; // this will be checked later in EIO_AfterQuery - -} - -Handle Database::Columns(const Arguments& args) { - HandleScope scope; - - REQ_STR_OR_NULL_ARG(0, catalog); - REQ_STR_OR_NULL_ARG(1, schema); - REQ_STR_OR_NULL_ARG(2, table); - REQ_STR_OR_NULL_ARG(3, column); - Local cb = Local::Cast(args[4]); - - Database* dbo = ObjectWrap::Unwrap(args.This()); - - struct query_request *prep_req = (struct query_request *) - calloc(1, sizeof(struct query_request)); - - if (!prep_req) { - V8::LowMemoryNotification(); - return ThrowException(Exception::Error(String::New("Could not allocate enough memory"))); - } - - prep_req->sql = NULL; - prep_req->catalog = NULL; - prep_req->schema = NULL; - prep_req->table = NULL; - prep_req->type = NULL; - prep_req->column = NULL; - prep_req->cb = Persistent::New(cb); - - if (!String::New(*catalog)->Equals(String::New("null"))) { - prep_req->catalog = (char *) malloc(catalog.length() +1); - strcpy(prep_req->catalog, *catalog); - } - - if (!String::New(*schema)->Equals(String::New("null"))) { - prep_req->schema = (char *) malloc(schema.length() +1); - strcpy(prep_req->schema, *schema); - } - - if (!String::New(*table)->Equals(String::New("null"))) { - prep_req->table = (char *) malloc(table.length() +1); - strcpy(prep_req->table, *table); - } - - if (!String::New(*column)->Equals(String::New("null"))) { - prep_req->column = (char *) malloc(column.length() +1); - strcpy(prep_req->column, *column); - } - - prep_req->dbo = dbo; - - eio_custom(EIO_Columns, EIO_PRI_DEFAULT, EIO_AfterQuery, prep_req); - - ev_ref(EV_DEFAULT_UC); - dbo->Ref(); - scope.Close(Undefined()); - return Undefined(); -} - -void Database::printError(const char *fn, SQLHANDLE handle, SQLSMALLINT type) -{ - SQLINTEGER i = 0; - SQLINTEGER native; - SQLCHAR state[ 7 ]; - SQLCHAR text[256]; - SQLSMALLINT len; - SQLRETURN ret; - - fprintf(stderr, - "\n" - "The driver reported the following diagnostics whilst running " - "%s\n\n", - fn - ); - - do { - ret = SQLGetDiagRec(type, handle, ++i, state, &native, text, sizeof(text), &len ); - if (SQL_SUCCEEDED(ret)) - printf("%s:%ld:%ld:%s\n", state, (long int) i, (long int) native, text); - } - while( ret == SQL_SUCCESS ); -} - - -Persistent Database::constructor_template; - -extern "C" void init (v8::Handle target) { - Database::Init(target); -} diff --git a/src/Database.h b/src/Database.h deleted file mode 100644 index 66a5976d..00000000 --- a/src/Database.h +++ /dev/null @@ -1,157 +0,0 @@ -/* - Copyright (c) 2010, Lee Smith - - Permission to use, copy, modify, and/or distribute this software for any - purpose with or without fee is hereby granted, provided that the above - copyright notice and this permission notice appear in all copies. - - THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. -*/ - -#ifndef DATABASE_H -#define DATABASE_H - -#include -#include - -#include -#include -#include -#include - -#include - -using namespace v8; -using namespace node; - -class Database : public node::ObjectWrap { - public: - static Persistent constructor_template; - static void Init(v8::Handle target); - static pthread_mutex_t m_odbcMutex; - - protected: - Database() { } - - ~Database() { - } - - static Handle New(const Arguments& args); - - static int EIO_AfterOpen(eio_req *req); - static void EIO_Open(eio_req *req); - static Handle Open(const Arguments& args); - - static int EIO_AfterClose(eio_req *req); - static void EIO_Close(eio_req *req); - static Handle Close(const Arguments& args); - - static int EIO_AfterQuery(eio_req *req); - static void EIO_Query(eio_req *req); - static Handle Query(const Arguments& args); - - static void EIO_Tables(eio_req *req); - static Handle Tables(const Arguments& args); - - static void EIO_Columns(eio_req *req); - static Handle Columns(const Arguments& args); - - Database *self(void) { return this; } - void printError(const char *fn, SQLHANDLE handle, SQLSMALLINT type); - - protected: - HENV m_hEnv; - HDBC m_hDBC; - HSTMT m_hStmt; - SQLUSMALLINT canHaveMoreResults; -}; - -enum ExecMode - { - EXEC_EMPTY = 0, - EXEC_LAST_INSERT_ID = 1, - EXEC_AFFECTED_ROWS = 2 - }; - -struct open_request { - Persistent cb; - Database *dbo; - char connection[1]; -}; - -struct close_request { - Persistent cb; - Database *dbo; -}; - -typedef struct { - SQLSMALLINT c_type; - SQLSMALLINT type; - SQLLEN size; - void *buffer; - SQLLEN buffer_length; - SQLLEN length; -} Parameter; - -struct query_request { - Persistent cb; - Database *dbo; - int affectedRows; - char *sql; - char *catalog; - char *schema; - char *table; - char *type; - char *column; - Parameter *params; - int paramCount; -}; - -#define REQ_ARGS(N) \ - if (args.Length() < (N)) \ - return ThrowException(Exception::TypeError( \ - String::New("Expected " #N "arguments"))); - -#define REQ_STR_ARG(I, VAR) \ - if (args.Length() <= (I) || !args[I]->IsString()) \ - return ThrowException(Exception::TypeError( \ - String::New("Argument " #I " must be a string"))); \ - String::Utf8Value VAR(args[I]->ToString()); - -#define REQ_STR_OR_NULL_ARG(I, VAR) \ - if ( args.Length() <= (I) || (!args[I]->IsString() && !args[I]->IsNull()) ) \ - return ThrowException(Exception::TypeError( \ - String::New("Argument " #I " must be a string or null"))); \ - String::Utf8Value VAR(args[I]->ToString()); - -#define REQ_FUN_ARG(I, VAR) \ - if (args.Length() <= (I) || !args[I]->IsFunction()) \ - return ThrowException(Exception::TypeError( \ - String::New("Argument " #I " must be a function"))); \ - Local VAR = Local::Cast(args[I]); - -#define REQ_EXT_ARG(I, VAR) \ - if (args.Length() <= (I) || !args[I]->IsExternal()) \ - return ThrowException(Exception::TypeError( \ - String::New("Argument " #I " invalid"))); \ - Local VAR = Local::Cast(args[I]); - -#define OPT_INT_ARG(I, VAR, DEFAULT) \ - int VAR; \ - if (args.Length() <= (I)) { \ - VAR = (DEFAULT); \ - } else if (args[I]->IsInt32()) { \ - VAR = args[I]->Int32Value(); \ - } else { \ - return ThrowException(Exception::TypeError( \ - String::New("Argument " #I " must be an integer"))); \ - } - - -#endif diff --git a/src/dynodbc.cpp b/src/dynodbc.cpp new file mode 100644 index 00000000..58c3e5fa --- /dev/null +++ b/src/dynodbc.cpp @@ -0,0 +1,189 @@ +#ifdef dynodbc + +#include "dynodbc.h" +#include + +#ifdef _WIN32 + #include +#elif defined(__GNUC__) // GNU compiler + #include +#else +#error define your copiler +#endif + +#include +/* +#define RTLD_LAZY 1 +#define RTLD_NOW 2 +#define RTLD_GLOBAL 4 +*/ + +void* LoadSharedLibrary(char *pcDllname, int iMode = 2) +{ + std::string sDllName = pcDllname; +#ifdef _WIN32 + sDllName += ".dll"; + return (void*)LoadLibraryA(pcDllname); +#elif defined(__GNUC__) // GNU compiler + sDllName += ".so"; + void* handle = dlopen(sDllName.c_str(),iMode); + + if (!handle) { + printf("node-odbc: error loading ODBC library: %s\n", dlerror()); + } + + return handle; +#endif +} + +void* GetFunction(void *Lib, char *Fnname) +{ +#if defined(_MSC_VER) // Microsoft compiler + return (void*)GetProcAddress((HINSTANCE)Lib,Fnname); +#elif defined(__GNUC__) // GNU compiler + void * tmp = dlsym(Lib, Fnname); + if (!tmp) { + printf("node-odbc: error loading function: %s\n", Fnname); + } + return tmp; +#endif +} + +bool FreeSharedLibrary(void *hDLL) +{ +#if defined(_MSC_VER) // Microsoft compiler + return (FreeLibrary((HINSTANCE)hDLL)!=0); +#elif defined(__GNUC__) // GNU compiler + return dlclose(hDLL); +#endif +} + +pfnSQLGetData pSQLGetData; +pfnSQLGetFunctions pSQLGetFunctions; +pfnSQLAllocConnect pSQLAllocConnect; +pfnSQLAllocEnv pSQLAllocEnv; +pfnSQLAllocStmt pSQLAllocStmt; +pfnSQLBindCol pSQLBindCol; +pfnSQLCancel pSQLCancel; +pfnSQLColAttributes pSQLColAttributes; +pfnSQLConnect pSQLConnect; +pfnSQLDescribeCol pSQLDescribeCol; +pfnSQLDisconnect pSQLDisconnect; +pfnSQLError pSQLError; +pfnSQLExecDirect pSQLExecDirect; +pfnSQLExecute pSQLExecute; +pfnSQLFetch pSQLFetch; +pfnSQLGetDiagRec pSQLGetDiagRec; +pfnSQLGetDiagField pSQLGetDiagField; +pfnSQLFreeHandle pSQLFreeHandle; +pfnSQLFetchScroll pSQLFetchScroll; +pfnSQLColAttribute pSQLColAttribute; +pfnSQLSetConnectAttr pSQLSetConnectAttr; +pfnSQLDriverConnect pSQLDriverConnect; +pfnSQLAllocHandle pSQLAllocHandle; +pfnSQLRowCount pSQLRowCount; +pfnSQLNumResultCols pSQLNumResultCols; +pfnSQLEndTran pSQLEndTran; +pfnSQLTables pSQLTables; +pfnSQLColumns pSQLColumns; +pfnSQLBindParameter pSQLBindParameter; +pfnSQLPrimaryKeys pSQLPrimaryKeys; +pfnSQLSetEnvAttr pSQLSetEnvAttr ; +pfnSQLFreeConnect pSQLFreeConnect; +pfnSQLFreeEnv pSQLFreeEnv; +pfnSQLFreeStmt pSQLFreeStmt; +pfnSQLGetCursorName pSQLGetCursorName; +pfnSQLPrepare pSQLPrepare; +pfnSQLSetCursorName pSQLSetCursorName; +pfnSQLTransact pSQLTransact; +pfnSQLSetConnectOption pSQLSetConnectOption; +pfnSQLDrivers pSQLDrivers; +pfnSQLDataSources pSQLDataSources; +pfnSQLGetInfo pSQLGetInfo; +pfnSQLMoreResults pSQLMoreResults; + +//#define LOAD_ENTRY( hMod, Name ) (p##Name = (pfn##Name) GetProcAddress( (hMod), #Name )) +#define LOAD_ENTRY( hMod, Name ) (p##Name = (pfn##Name) GetFunction( (hMod), #Name )) + +static BOOL s_fODBCLoaded = false; + +BOOL DynLoadODBC( char* odbcModuleName ) +{ +#ifdef _WIN32 + HMODULE hMod; +#elif defined(__GNUC__) // GNU compiler + void* hMod; +#endif + + if ( s_fODBCLoaded ) + return true; + + // if ( (hMod = (HMODULE) LoadLibrary( odbcModuleName ))) { +#ifdef _WIN32 + if ( (hMod = (HMODULE) LoadSharedLibrary( odbcModuleName ))) { +#elif defined(__GNUC__) // GNU compiler + if ( (hMod = (void *) LoadSharedLibrary( odbcModuleName ))) { +#endif + +//#if (ODBCVER < 0x0300) + if (LOAD_ENTRY( hMod, SQLGetData ) ) + if (LOAD_ENTRY( hMod, SQLGetFunctions ) ) + //Unused-> if (LOAD_ENTRY( hMod, SQLAllocConnect ) ) + //Unused-> if (LOAD_ENTRY( hMod, SQLAllocEnv ) ) + //Unused-> if (LOAD_ENTRY( hMod, SQLAllocStmt ) ) + //Unused-> if (LOAD_ENTRY( hMod, SQLColAttributes ) ) + //Unused-> if (LOAD_ENTRY( hMod, SQLError ) ) + //Unused-> if (LOAD_ENTRY( hMod, SQLFreeConnect ) ) + //Unused-> if (LOAD_ENTRY( hMod, SQLFreeEnv ) ) + //Unused-> if (LOAD_ENTRY( hMod, SQLTransact ) ) + //Unused-> if (LOAD_ENTRY( hMod, SQLSetConnectOption ) ) +/* + * NOTE: This is commented out because it wouldn't be used + * in a direct-to-driver situation and we currently never + * call SQLDrivers. But if we ever do we may need to have + * some type of flag to determine if we should try to load + * this function if the user is not doing a direct-to-driver + * and is specifying a specific libodbc library. + */ +//Unused-> if (LOAD_ENTRY( hMod, SQLDrivers ) ) + + //Unused-> if (LOAD_ENTRY( hMod, SQLDataSources ) ) +//#endif + //Unused-> if (LOAD_ENTRY( hMod, SQLBindCol ) ) + //Unused-> if (LOAD_ENTRY( hMod, SQLCancel ) ) + //Unused-> if (LOAD_ENTRY( hMod, SQLConnect ) ) + //Unused-> if (LOAD_ENTRY( hMod, SQLDescribeCol ) ) + if (LOAD_ENTRY( hMod, SQLDisconnect ) ) + if (LOAD_ENTRY( hMod, SQLExecDirect ) ) + if (LOAD_ENTRY( hMod, SQLExecute ) ) + if (LOAD_ENTRY( hMod, SQLFetch ) ) + if (LOAD_ENTRY( hMod, SQLGetDiagRec ) ) + if (LOAD_ENTRY( hMod, SQLGetDiagField ) ) + if (LOAD_ENTRY( hMod, SQLFreeHandle ) ) + //Unused-> if (LOAD_ENTRY( hMod, SQLFetchScroll ) ) + if (LOAD_ENTRY( hMod, SQLColAttribute ) ) + if (LOAD_ENTRY( hMod, SQLSetConnectAttr ) ) + if (LOAD_ENTRY( hMod, SQLDriverConnect ) ) + if (LOAD_ENTRY( hMod, SQLAllocHandle ) ) + if (LOAD_ENTRY( hMod, SQLRowCount ) ) + if (LOAD_ENTRY( hMod, SQLNumResultCols ) ) + if (LOAD_ENTRY( hMod, SQLEndTran ) ) + if (LOAD_ENTRY( hMod, SQLTables ) ) + if (LOAD_ENTRY( hMod, SQLColumns ) ) + if (LOAD_ENTRY( hMod, SQLBindParameter ) ) + //Unused-> if (LOAD_ENTRY( hMod, SQLPrimaryKeys) ) + if (LOAD_ENTRY( hMod, SQLSetEnvAttr ) ) + if (LOAD_ENTRY( hMod, SQLFreeStmt ) ) + if (LOAD_ENTRY( hMod, SQLPrepare ) ) + //Unused-> if (LOAD_ENTRY( hMod, SQLGetInfo ) ) + if (LOAD_ENTRY( hMod, SQLBindParameter ) ) + if (LOAD_ENTRY( hMod, SQLMoreResults ) + ) { + + s_fODBCLoaded = true; + } + } + + return (s_fODBCLoaded); +} +#endif diff --git a/src/dynodbc.h b/src/dynodbc.h new file mode 100644 index 00000000..083e18aa --- /dev/null +++ b/src/dynodbc.h @@ -0,0 +1,383 @@ +#ifndef _SRC_DYNODBC_H_ +#define _SRC_DYNODBC_H_ + +#ifdef dynodbc + +#ifdef _WIN32 +#include +#endif +#include +#include + +typedef RETCODE (SQL_API * pfnSQLGetData)( + SQLHSTMT StatementHandle, + SQLUSMALLINT Col_or_Param_Num, + SQLSMALLINT TargetType, + SQLPOINTER TargetValuePtr, + SQLLEN BufferLength, + SQLLEN * StrLen_or_IndPtr); + +typedef RETCODE (SQL_API * pfnSQLGetFunctions)( + HDBC ConnectionHandle, + SQLUSMALLINT FunctionId, + SQLUSMALLINT * SupportedPtr); + +typedef RETCODE (SQL_API * pfnSQLAllocConnect)( + HENV henv, + HDBC FAR *phdbc); + +typedef RETCODE (SQL_API * pfnSQLAllocEnv)( + HENV FAR *phenv); + +typedef RETCODE (SQL_API * pfnSQLAllocStmt)( + HDBC hdbc, + HSTMT FAR *phstmt); + +typedef RETCODE (SQL_API * pfnSQLBindCol)( + HSTMT hstmt, + UWORD icol, + SWORD fCType, + PTR rgbValue, + SDWORD cbValueMax, + SDWORD FAR *pcbValue); + +typedef RETCODE (SQL_API * pfnSQLCancel)( + HSTMT hstmt); + +typedef RETCODE (SQL_API * pfnSQLColAttributes)( + HSTMT hstmt, + UWORD icol, + UWORD fDescType, + PTR rgbDesc, + SWORD cbDescMax, + SWORD FAR *pcbDesc, + SDWORD FAR *pfDesc); + + +typedef RETCODE (SQL_API * pfnSQLColAttribute)( + SQLHSTMT StatementHandle, + SQLUSMALLINT ColumnNumber, SQLUSMALLINT FieldIdentifier, + SQLPOINTER CharacterAttribute, SQLSMALLINT BufferLength, + SQLSMALLINT *StringLength, SQLPOINTER NumericAttribute); + +typedef RETCODE (SQL_API * pfnSQLSetConnectAttr)( + SQLHDBC ConnectionHandle, + SQLINTEGER Attribute, SQLPOINTER Value, + SQLINTEGER StringLength); + +typedef RETCODE (SQL_API * pfnSQLDriverConnect)( + SQLHDBC hdbc, + SQLHWND hwnd, + SQLTCHAR *szConnStrIn, + SQLSMALLINT cbConnStrIn, + SQLTCHAR *szConnStrOut, + SQLSMALLINT cbConnStrOutMax, + SQLSMALLINT *pcbConnStrOut, + SQLUSMALLINT fDriverCompletion); + +typedef RETCODE (SQL_API * pfnSQLAllocHandle)( + SQLSMALLINT HandleType, + SQLHANDLE InputHandle, SQLHANDLE *OutputHandle); + +typedef RETCODE (SQL_API * pfnSQLRowCount)( + SQLHSTMT StatementHandle, + SQLLEN *RowCount); + +typedef RETCODE (SQL_API * pfnSQLNumResultCols)( + SQLHSTMT StatementHandle, + SQLSMALLINT *ColumnCount); + +typedef RETCODE (SQL_API * pfnSQLEndTran)( + SQLSMALLINT HandleType, SQLHANDLE Handle, + SQLSMALLINT CompletionType); + +typedef RETCODE (SQL_API * pfnSQLExecDirect)( + SQLHSTMT StatementHandle, + SQLTCHAR *StatementText, SQLINTEGER TextLength); + + +typedef RETCODE (SQL_API * pfnSQLTables)( + SQLHSTMT StatementHandle, + SQLTCHAR *CatalogName, SQLSMALLINT NameLength1, + SQLTCHAR *SchemaName, SQLSMALLINT NameLength2, + SQLTCHAR *TableName, SQLSMALLINT NameLength3, + SQLTCHAR *TableType, SQLSMALLINT NameLength4); + +typedef RETCODE (SQL_API * pfnSQLColumns)( + SQLHSTMT StatementHandle, + SQLTCHAR *CatalogName, SQLSMALLINT NameLength1, + SQLTCHAR *SchemaName, SQLSMALLINT NameLength2, + SQLTCHAR *TableName, SQLSMALLINT NameLength3, + SQLTCHAR *ColumnName, SQLSMALLINT NameLength4); + +typedef RETCODE (SQL_API * pfnSQLBindParameter)( + SQLHSTMT hstmt, + SQLUSMALLINT ipar, + SQLSMALLINT fParamType, + SQLSMALLINT fCType, + SQLSMALLINT fSqlType, + SQLUINTEGER cbColDef, + SQLSMALLINT ibScale, + SQLPOINTER rgbValue, + SQLINTEGER cbValueMax, + SQLLEN *pcbValue); + +typedef RETCODE (SQL_API * pfnSQLPrimaryKeys)( + SQLHSTMT hstmt, + SQLTCHAR *szCatalogName, + SQLSMALLINT cbCatalogName, + SQLTCHAR *szSchemaName, + SQLSMALLINT cbSchemaName, + SQLTCHAR *szTableName, + SQLSMALLINT cbTableName); + +typedef RETCODE (SQL_API * pfnSQLSetEnvAttr)( + SQLHENV EnvironmentHandle, + SQLINTEGER Attribute, SQLPOINTER Value, + SQLINTEGER StringLength); + + +typedef RETCODE (SQL_API * pfnSQLConnect)( + HDBC hdbc, + UCHAR FAR *szDSN, + SWORD cbDSN, + UCHAR FAR *szUID, + SWORD cbUID, + UCHAR FAR *szAuthStr, + SWORD cbAuthStr); + +typedef RETCODE (SQL_API * pfnSQLDescribeCol)( + HSTMT hstmt, + UWORD icol, + UCHAR FAR *szColName, + SWORD cbColNameMax, + SWORD FAR *pcbColName, + SWORD FAR *pfSqlType, + UDWORD FAR *pcbColDef, + SWORD FAR *pibScale, + SWORD FAR *pfNullable); + +typedef RETCODE (SQL_API * pfnSQLDisconnect)( + HDBC hdbc); + +typedef RETCODE (SQL_API * pfnSQLError)( + HENV henv, + HDBC hdbc, + HSTMT hstmt, + UCHAR FAR *szSqlState, + SDWORD FAR *pfNativeError, + UCHAR FAR *szErrorMsg, + SWORD cbErrorMsgMax, + SWORD FAR *pcbErrorMsg); + +/*typedef RETCODE (SQL_API * pfnSQLExecDirect)( + HSTMT hstmt, + UCHAR FAR *szSqlStr, + SDWORD cbSqlStr); +*/ +typedef RETCODE (SQL_API * pfnSQLExecute)( + HSTMT hstmt); + +typedef RETCODE (SQL_API * pfnSQLFetch)( + HSTMT hstmt); + +typedef RETCODE (SQL_API * pfnSQLGetDiagRec)( + SQLSMALLINT HandleType, SQLHANDLE Handle, + SQLSMALLINT RecNumber, SQLTCHAR *Sqlstate, + SQLINTEGER *NativeError, SQLTCHAR *MessageText, + SQLSMALLINT BufferLength, SQLSMALLINT *TextLength); + +typedef RETCODE (SQL_API * pfnSQLGetDiagField)( + SQLSMALLINT HandleType, SQLHANDLE Handle, + SQLSMALLINT RecNumber, SQLSMALLINT DiagIdentifier, + SQLPOINTER DiagInfoPtr, SQLSMALLINT BufferLength, + SQLSMALLINT *StringLengthPtr); + +typedef RETCODE (SQL_API * pfnSQLFreeHandle)( + SQLSMALLINT HandleType, SQLHANDLE Handle); + +typedef RETCODE (SQL_API * pfnSQLFetchScroll)( + SQLHSTMT StatementHandle, + SQLSMALLINT FetchOrientation, SQLINTEGER FetchOffset); + +typedef RETCODE (SQL_API * pfnSQLColAttribute)( + SQLHSTMT StatementHandle, + SQLUSMALLINT ColumnNumber, SQLUSMALLINT FieldIdentifier, + SQLPOINTER CharacterAttribute, SQLSMALLINT BufferLength, + SQLSMALLINT *StringLength, SQLPOINTER NumericAttribute); + + +typedef RETCODE (SQL_API * pfnSQLFreeConnect)( + HDBC hdbc); + +typedef RETCODE (SQL_API * pfnSQLFreeEnv)( + HENV henv); + +typedef RETCODE (SQL_API * pfnSQLFreeStmt)( + HSTMT hstmt, + UWORD fOption); + +typedef RETCODE (SQL_API * pfnSQLGetCursorName)( + HSTMT hstmt, + UCHAR FAR *szCursor, + SWORD cbCursorMax, + SWORD FAR *pcbCursor); + +typedef RETCODE (SQL_API * pfnSQLNumResultCols)( + HSTMT hstmt, + SWORD FAR *pccol); + +typedef RETCODE (SQL_API * pfnSQLPrepare)( + SQLHSTMT StatementHandle, + SQLTCHAR *StatementText, + SQLINTEGER TextLength); +// HSTMT hstmt, +// UCHAR FAR *szSqlStr, +// SDWORD cbSqlStr); + +typedef RETCODE (SQL_API * pfnSQLRowCount)( + HSTMT hstmt, + SQLLEN FAR *pcrow); + +typedef RETCODE (SQL_API * pfnSQLSetCursorName)( + HSTMT hstmt, + UCHAR FAR *szCursor, + SWORD cbCursor); + +typedef RETCODE (SQL_API * pfnSQLTransact)( + HENV henv, + HDBC hdbc, + UWORD fType); + +typedef RETCODE (SQL_API * pfnSQLSetConnectOption)( + HDBC hdbc, + UWORD fOption, + UDWORD vParam); + +typedef RETCODE (SQL_API * pfnSQLDrivers)( + HENV henv, + UWORD fDirection, + UCHAR FAR *szDriverDesc, + SWORD cbDriverDescMax, + SWORD FAR *pcbDriverDesc, + UCHAR FAR *szDriverAttributes, + SWORD cbDrvrAttrMax, + SWORD FAR *pcbDrvrAttr); + +// typedef RETCODE (SQL_API * pfnSQLBindParameter)( +// HSTMT hstmt, +// UWORD ipar, +// SWORD fParamType, +// SWORD fCType, +// SWORD fSqlType, +// UDWORD cbColDef, +// SWORD ibScale, +// PTR rgbValue, +// SDWORD cbValueMax, +// SDWORD FAR *pcbValue); + +typedef RETCODE (SQL_API * pfnSQLDataSources)( + HENV henv, + UWORD fDirection, + UCHAR FAR *szDSN, + SWORD cbDSNMax, + SWORD FAR *pcbDSN, + UCHAR FAR *szDescription, + SWORD cbDescriptionMax, + SWORD FAR *pcbDescription); + +typedef RETCODE (SQL_API * pfnSQLGetInfo)( + HDBC hdbc, + UWORD fInfoType, + PTR rgbInfoValue, + SWORD cbInfoValueMax, + SWORD FAR *pcbInfoValue); + +typedef RETCODE (SQL_API * pfnSQLMoreResults)( + HSTMT hstmt); + +extern pfnSQLGetData pSQLGetData; +extern pfnSQLGetFunctions pSQLGetFunctions; +extern pfnSQLAllocConnect pSQLAllocConnect; +extern pfnSQLAllocEnv pSQLAllocEnv; +extern pfnSQLAllocStmt pSQLAllocStmt; +extern pfnSQLBindCol pSQLBindCol; +extern pfnSQLCancel pSQLCancel; +extern pfnSQLColAttributes pSQLColAttributes; +extern pfnSQLConnect pSQLConnect; +extern pfnSQLDescribeCol pSQLDescribeCol; +extern pfnSQLDisconnect pSQLDisconnect; +extern pfnSQLError pSQLError; +extern pfnSQLExecDirect pSQLExecDirect; +extern pfnSQLExecute pSQLExecute; +extern pfnSQLFetch pSQLFetch; +extern pfnSQLGetDiagRec pSQLGetDiagRec; +extern pfnSQLGetDiagField pSQLGetDiagField; +extern pfnSQLFreeHandle pSQLFreeHandle; +extern pfnSQLFetchScroll pSQLFetchScroll; +extern pfnSQLFetchScroll pSQLFetchScroll; +extern pfnSQLColAttribute pSQLColAttribute; +extern pfnSQLSetConnectAttr pSQLSetConnectAttr; +extern pfnSQLDriverConnect pSQLDriverConnect; +extern pfnSQLAllocHandle pSQLAllocHandle; +extern pfnSQLRowCount pSQLRowCount; +extern pfnSQLNumResultCols pSQLNumResultCols; +extern pfnSQLEndTran pSQLEndTran; +//extern pfnSQLExecDirect pSQLExecDirect; +extern pfnSQLTables pSQLTables; +extern pfnSQLColumns pSQLColumns; +// extern pfnSQLBindParameter pSQLBindParameter; +extern pfnSQLPrimaryKeys pSQLPrimaryKeys; +extern pfnSQLSetEnvAttr pSQLSetEnvAttr; +extern pfnSQLFreeConnect pSQLFreeConnect; +extern pfnSQLFreeEnv pSQLFreeEnv; +extern pfnSQLFreeStmt pSQLFreeStmt; +extern pfnSQLGetCursorName pSQLGetCursorName; +extern pfnSQLNumResultCols pSQLNumResultCols; +extern pfnSQLPrepare pSQLPrepare; +extern pfnSQLRowCount pSQLRowCount; +extern pfnSQLSetCursorName pSQLSetCursorName; +extern pfnSQLTransact pSQLTransact; +extern pfnSQLSetConnectOption pSQLSetConnectOption; +extern pfnSQLDrivers pSQLDrivers; +extern pfnSQLDataSources pSQLDataSources; +extern pfnSQLBindParameter pSQLBindParameter; +extern pfnSQLGetInfo pSQLGetInfo; +extern pfnSQLMoreResults pSQLMoreResults; + +BOOL DynLoadODBC( char* odbcModuleName ); + +#define SQLAllocEnv pSQLAllocEnv +#define SQLAllocConnect pSQLAllocConnect +#define SQLSetConnectOption pSQLSetConnectOption +#define SQLAllocStmt pSQLAllocStmt +#define SQLGetFunctions pSQLGetFunctions +#define SQLError pSQLError +#define SQLGetData pSQLGetData +#define SQLMoreResults pSQLMoreResults +#define SQLPrepare pSQLPrepare +#define SQLExecute pSQLExecute +#define SQLGetDiagRec pSQLGetDiagRec +#define SQLGetDiagField pSQLGetDiagField +#define SQLFreeHandle pSQLFreeHandle +#define SQLFreeStmt pSQLFreeStmt +#define SQLFetchScroll pSQLFetchScroll +#define SQLFetch pSQLFetch +#define SQLBindCol pSQLBindCol +#define SQLColAttribute pSQLColAttribute +#define SQLGetInfo pSQLGetInfo +#define SQLDriverConnect pSQLDriverConnect +#define SQLAllocHandle pSQLAllocHandle +#define SQLDisconnect pSQLDisconnect +#define SQLRowCount pSQLRowCount +#define SQLNumResultCols pSQLNumResultCols +#define SQLSetConnectAttr pSQLSetConnectAttr +#define SQLEndTran pSQLEndTran +#define SQLExecDirect pSQLExecDirect +#define SQLTables pSQLTables +#define SQLColumns pSQLColumns +#define SQLBindParameter pSQLBindParameter +#define SQLPrimaryKeys pSQLPrimaryKeys +#define SQLSetEnvAttr pSQLSetEnvAttr +#endif +#endif // _SRC_DYNODBC_H_ diff --git a/src/odbc.cpp b/src/odbc.cpp new file mode 100644 index 00000000..114b1f4b --- /dev/null +++ b/src/odbc.cpp @@ -0,0 +1,984 @@ +/* + Copyright (c) 2013, Dan VerWeire + Copyright (c) 2010, Lee Smith + + Permission to use, copy, modify, and/or distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ + +#include +#include +#include +#include +#include +#include + +#include "odbc.h" +#include "odbc_connection.h" +#include "odbc_result.h" +#include "odbc_statement.h" + +#ifdef dynodbc +#include "dynodbc.h" +#endif + +#ifdef _WIN32 +#include "strptime.h" +#endif + +using namespace v8; +using namespace node; + +uv_mutex_t ODBC::g_odbcMutex; + +Nan::Persistent ODBC::constructor; + +void ODBC::Init(v8::Handle exports) { + DEBUG_PRINTF("ODBC::Init\n"); + Nan::HandleScope scope; + + Local constructor_template = Nan::New(New); + + // Constructor Template + constructor_template->SetClassName(Nan::New("ODBC").ToLocalChecked()); + + // Reserve space for one Handle + Local instance_template = constructor_template->InstanceTemplate(); + instance_template->SetInternalFieldCount(1); + + // Constants +#if (NODE_MODULE_VERSION < NODE_0_12_MODULE_VERSION) + +#else + +#endif + PropertyAttribute constant_attributes = static_cast(ReadOnly | DontDelete); + constructor_template->Set(Nan::New("SQL_CLOSE").ToLocalChecked(), Nan::New(SQL_CLOSE), constant_attributes); + constructor_template->Set(Nan::New("SQL_DROP").ToLocalChecked(), Nan::New(SQL_DROP), constant_attributes); + constructor_template->Set(Nan::New("SQL_UNBIND").ToLocalChecked(), Nan::New(SQL_UNBIND), constant_attributes); + constructor_template->Set(Nan::New("SQL_RESET_PARAMS").ToLocalChecked(), Nan::New(SQL_RESET_PARAMS), constant_attributes); + constructor_template->Set(Nan::New("SQL_DESTROY").ToLocalChecked(), Nan::New(SQL_DESTROY), constant_attributes); + constructor_template->Set(Nan::New("FETCH_ARRAY").ToLocalChecked(), Nan::New(FETCH_ARRAY), constant_attributes); + constructor_template->Set(Nan::New("SQL_USER_NAME").ToLocalChecked(), Nan::New(SQL_USER_NAME), constant_attributes); + NODE_ODBC_DEFINE_CONSTANT(constructor_template, FETCH_OBJECT); + + // Prototype Methods + Nan::SetPrototypeMethod(constructor_template, "createConnection", CreateConnection); + Nan::SetPrototypeMethod(constructor_template, "createConnectionSync", CreateConnectionSync); + + // Attach the Database Constructor to the target object + constructor.Reset(constructor_template->GetFunction()); + exports->Set(Nan::New("ODBC").ToLocalChecked(), + constructor_template->GetFunction()); + + // Initialize the cross platform mutex provided by libuv + uv_mutex_init(&ODBC::g_odbcMutex); +} + +ODBC::~ODBC() { + DEBUG_PRINTF("ODBC::~ODBC\n"); + this->Free(); +} + +void ODBC::Free() { + DEBUG_PRINTF("ODBC::Free\n"); + if (m_hEnv) { + uv_mutex_lock(&ODBC::g_odbcMutex); + + if (m_hEnv) { + SQLFreeHandle(SQL_HANDLE_ENV, m_hEnv); + m_hEnv = NULL; + } + + uv_mutex_unlock(&ODBC::g_odbcMutex); + } +} + +NAN_METHOD(ODBC::New) { + DEBUG_PRINTF("ODBC::New\n"); + Nan::HandleScope scope; + ODBC* dbo = new ODBC(); + + dbo->Wrap(info.Holder()); + + dbo->m_hEnv = NULL; + + uv_mutex_lock(&ODBC::g_odbcMutex); + + // Initialize the Environment handle + int ret = SQLAllocHandle(SQL_HANDLE_ENV, SQL_NULL_HANDLE, &dbo->m_hEnv); + + uv_mutex_unlock(&ODBC::g_odbcMutex); + + if (!SQL_SUCCEEDED(ret)) { + DEBUG_PRINTF("ODBC::New - ERROR ALLOCATING ENV HANDLE!!\n"); + + Local objError = ODBC::GetSQLError(SQL_HANDLE_ENV, dbo->m_hEnv); + + return Nan::ThrowError(objError); + } + + // Use ODBC 3.x behavior + SQLSetEnvAttr(dbo->m_hEnv, SQL_ATTR_ODBC_VERSION, (SQLPOINTER) SQL_OV_ODBC3, SQL_IS_UINTEGER); + + info.GetReturnValue().Set(info.Holder()); +} + +//void ODBC::WatcherCallback(uv_async_t *w, int revents) { +// DEBUG_PRINTF("ODBC::WatcherCallback\n"); +// //i don't know if we need to do anything here +//} + +/* + * CreateConnection + */ + +NAN_METHOD(ODBC::CreateConnection) { + DEBUG_PRINTF("ODBC::CreateConnection\n"); + Nan::HandleScope scope; + + Local cb = info[0].As(); + Nan::Callback *callback = new Nan::Callback(cb); + //REQ_FUN_ARG(0, cb); + + ODBC* dbo = Nan::ObjectWrap::Unwrap(info.Holder()); + + //initialize work request + uv_work_t* work_req = (uv_work_t *) (calloc(1, sizeof(uv_work_t))); + + //initialize our data + create_connection_work_data* data = + (create_connection_work_data *) (calloc(1, sizeof(create_connection_work_data))); + + data->cb = callback; + data->dbo = dbo; + + work_req->data = data; + + uv_queue_work(uv_default_loop(), work_req, UV_CreateConnection, (uv_after_work_cb)UV_AfterCreateConnection); + + dbo->Ref(); + + info.GetReturnValue().Set(Nan::Undefined()); +} + +void ODBC::UV_CreateConnection(uv_work_t* req) { + DEBUG_PRINTF("ODBC::UV_CreateConnection\n"); + + //get our work data + create_connection_work_data* data = (create_connection_work_data *)(req->data); + + uv_mutex_lock(&ODBC::g_odbcMutex); + + //allocate a new connection handle + data->result = SQLAllocHandle(SQL_HANDLE_DBC, data->dbo->m_hEnv, &data->hDBC); + + uv_mutex_unlock(&ODBC::g_odbcMutex); +} + +void ODBC::UV_AfterCreateConnection(uv_work_t* req, int status) { + DEBUG_PRINTF("ODBC::UV_AfterCreateConnection\n"); + Nan::HandleScope scope; + + create_connection_work_data* data = (create_connection_work_data *)(req->data); + + Nan::TryCatch try_catch; + + if (!SQL_SUCCEEDED(data->result)) { + Local info[1]; + + info[0] = ODBC::GetSQLError(SQL_HANDLE_ENV, data->dbo->m_hEnv); + + data->cb->Call(1, info); + } + else { + Local info[2]; + info[0] = Nan::New(data->dbo->m_hEnv); + info[1] = Nan::New(data->hDBC); + + Local js_result = Nan::NewInstance(Nan::New(ODBCConnection::constructor), 2, info).ToLocalChecked(); + + info[0] = Nan::Null(); + info[1] = js_result; + + data->cb->Call(data->dbo->handle(), 2, info); + } + + if (try_catch.HasCaught()) { + Nan::FatalException(try_catch); + } + + + data->dbo->Unref(); + delete data->cb; + + free(data); + free(req); +} + +/* + * CreateConnectionSync + */ + +NAN_METHOD(ODBC::CreateConnectionSync) { + DEBUG_PRINTF("ODBC::CreateConnectionSync\n"); + Nan::HandleScope scope; + + ODBC* dbo = Nan::ObjectWrap::Unwrap(info.Holder()); + + HDBC hDBC; + + uv_mutex_lock(&ODBC::g_odbcMutex); + + //allocate a new connection handle + SQLRETURN ret = SQLAllocHandle(SQL_HANDLE_DBC, dbo->m_hEnv, &hDBC); + + if (!SQL_SUCCEEDED(ret)) { + //TODO: do something! + } + + uv_mutex_unlock(&ODBC::g_odbcMutex); + + Local params[2]; + params[0] = Nan::New(dbo->m_hEnv); + params[1] = Nan::New(hDBC); + + Local js_result = Nan::NewInstance(Nan::New(ODBCConnection::constructor), 2, params).ToLocalChecked(); + + info.GetReturnValue().Set(js_result); +} + +/* + * GetColumns + */ + +Column* ODBC::GetColumns(SQLHSTMT hStmt, short* colCount) { + SQLRETURN ret; + SQLSMALLINT buflen; + + //always reset colCount for the current result set to 0; + *colCount = 0; + + //get the number of columns in the result set + ret = SQLNumResultCols(hStmt, colCount); + + if (!SQL_SUCCEEDED(ret)) { + return new Column[0]; + } + + Column *columns = new Column[*colCount]; + + for (int i = 0; i < *colCount; i++) { + //save the index number of this column + columns[i].index = i + 1; + //TODO:that's a lot of memory for each field name.... + columns[i].name = new unsigned char[MAX_FIELD_SIZE]; + + //set the first byte of name to \0 instead of memsetting the entire buffer + columns[i].name[0] = '\0'; + + //get the column name + ret = SQLColAttribute( hStmt, + columns[i].index, +#ifdef STRICT_COLUMN_NAMES + SQL_DESC_NAME, +#else + SQL_DESC_LABEL, +#endif + columns[i].name, + (SQLSMALLINT) MAX_FIELD_SIZE, + (SQLSMALLINT *) &buflen, + NULL); + + //store the len attribute + columns[i].len = buflen; + + //get the column type and store it directly in column[i].type + ret = SQLColAttribute( hStmt, + columns[i].index, + SQL_DESC_TYPE, + NULL, + 0, + NULL, + &columns[i].type); + } + + return columns; +} + +/* + * FreeColumns + */ + +void ODBC::FreeColumns(Column* columns, short* colCount) { + for(int i = 0; i < *colCount; i++) { + delete [] columns[i].name; + } + + delete [] columns; + + *colCount = 0; +} + +/* + * GetColumnValue + */ + +Handle ODBC::GetColumnValue( SQLHSTMT hStmt, Column column, + uint16_t* buffer, int bufferLength) { + Nan::EscapableHandleScope scope; + SQLLEN len = 0; + + //reset the buffer + buffer[0] = '\0'; + + //TODO: SQLGetData can supposedly return multiple chunks, need to do this to + //retrieve large fields + int ret; + + switch ((int) column.type) { + case SQL_INTEGER : + case SQL_SMALLINT : + case SQL_TINYINT : { + int32_t value = 0; + + ret = SQLGetData( + hStmt, + column.index, + SQL_C_SLONG, + &value, + sizeof(value), + &len); + + DEBUG_PRINTF("ODBC::GetColumnValue - Integer: index=%i name=%s type=%lli len=%lli ret=%i val=%li\n", + column.index, column.name, column.type, len, ret, value); + + if (len == SQL_NULL_DATA) { + return scope.Escape(Nan::Null()); + } + else { + return scope.Escape(Nan::New(value)); + } + } + break; + case SQL_NUMERIC : + case SQL_DECIMAL : + case SQL_BIGINT : + case SQL_FLOAT : + case SQL_REAL : + case SQL_DOUBLE : { + double value; + + ret = SQLGetData( + hStmt, + column.index, + SQL_C_DOUBLE, + &value, + sizeof(value), + &len); + + DEBUG_PRINTF("ODBC::GetColumnValue - Number: index=%i name=%s type=%lli len=%lli ret=%i val=%f\n", + column.index, column.name, column.type, len, ret, value); + + if (len == SQL_NULL_DATA) { + return scope.Escape(Nan::Null()); + //return Null(); + } + else { + return scope.Escape(Nan::New(value)); + //return Number::New(value); + } + } + break; + case SQL_DATETIME : + case SQL_TIMESTAMP : { + //I am not sure if this is locale-safe or cross database safe, but it + //works for me on MSSQL +#ifdef _WIN32 + struct tm timeInfo = {}; + + ret = SQLGetData( + hStmt, + column.index, + SQL_C_CHAR, + (char *) buffer, + bufferLength, + &len); + + DEBUG_PRINTF("ODBC::GetColumnValue - W32 Timestamp: index=%i name=%s type=%lli len=%lli\n", + column.index, column.name, column.type, len); + + if (len == SQL_NULL_DATA) { + return scope.Escape(Nan::Null()); + //return Null(); + } + else { + if (strptime((char *) buffer, "%Y-%m-%d %H:%M:%S", &timeInfo)) { + //a negative value means that mktime() should use timezone information + //and system databases to attempt to determine whether DST is in effect + //at the specified time. + timeInfo.tm_isdst = -1; + + //return scope.Escape(Date::New(Isolate::GetCurrent(), (double(mktime(&timeInfo)) * 1000)); + return scope.Escape(Nan::New(double(mktime(&timeInfo)) * 1000).ToLocalChecked()); + } + else { + return scope.Escape(Nan::New((char *)buffer).ToLocalChecked()); + } + } +#else + struct tm timeInfo = { + tm_sec : 0 + , tm_min : 0 + , tm_hour : 0 + , tm_mday : 0 + , tm_mon : 0 + , tm_year : 0 + , tm_wday : 0 + , tm_yday : 0 + , tm_isdst : 0 + #ifndef _AIX //AIX does not have these + , tm_gmtoff : 0 + , tm_zone : 0 + #endif + }; + + SQL_TIMESTAMP_STRUCT odbcTime; + + ret = SQLGetData( + hStmt, + column.index, + SQL_C_TYPE_TIMESTAMP, + &odbcTime, + bufferLength, + &len); + + DEBUG_PRINTF("ODBC::GetColumnValue - Unix Timestamp: index=%i name=%s type=%i len=%i\n", + column.index, column.name, column.type, len); + + if (len == SQL_NULL_DATA) { + return scope.Escape(Nan::Null()); + //return Null(); + } + else { + timeInfo.tm_year = odbcTime.year - 1900; + timeInfo.tm_mon = odbcTime.month - 1; + timeInfo.tm_mday = odbcTime.day; + timeInfo.tm_hour = odbcTime.hour; + timeInfo.tm_min = odbcTime.minute; + timeInfo.tm_sec = odbcTime.second; + + //a negative value means that mktime() should use timezone information + //and system databases to attempt to determine whether DST is in effect + //at the specified time. + timeInfo.tm_isdst = -1; +#ifdef TIMEGM + return scope.Escape(Nan::New((double(timegm(&timeInfo)) * 1000) + + (odbcTime.fraction / 1000000)).ToLocalChecked()); +#else +#ifdef _AIX + #define timelocal mktime +#endif + return scope.Escape(Nan::New((double(timelocal(&timeInfo)) * 1000) + + (odbcTime.fraction / 1000000)).ToLocalChecked()); +#endif + //return Date::New((double(timegm(&timeInfo)) * 1000) + // + (odbcTime.fraction / 1000000)); + } +#endif + } break; + case SQL_BIT : + //again, i'm not sure if this is cross database safe, but it works for + //MSSQL + ret = SQLGetData( + hStmt, + column.index, + SQL_C_CHAR, + (char *) buffer, + bufferLength, + &len); + + DEBUG_PRINTF("ODBC::GetColumnValue - Bit: index=%i name=%s type=%lli len=%lli\n", + column.index, column.name, column.type, len); + + if (len == SQL_NULL_DATA) { + return scope.Escape(Nan::Null()); + } + else { + return scope.Escape(Nan::New((*buffer == '0') ? false : true)); + } + default : + Local str; + int count = 0; + + do { + ret = SQLGetData( + hStmt, + column.index, + SQL_C_TCHAR, + (char *) buffer, + bufferLength, + &len); + + DEBUG_PRINTF("ODBC::GetColumnValue - String: index=%i name=%s type=%lli len=%lli value=%s ret=%i bufferLength=%i\n", + column.index, column.name, column.type, len,(char *) buffer, ret, bufferLength); + + if (len == SQL_NULL_DATA && str.IsEmpty()) { + return scope.Escape(Nan::Null()); + //return Null(); + } + + if (SQL_NO_DATA == ret) { + //we have captured all of the data + //double check that we have some data else return null + if (str.IsEmpty()){ + return scope.Escape(Nan::Null()); + } + + break; + } + else if (SQL_SUCCEEDED(ret)) { + //we have not captured all of the data yet + + if (count == 0) { + //no concatenation required, this is our first pass +#ifdef UNICODE + str = Nan::New((uint16_t*) buffer).ToLocalChecked(); +#else + str = Nan::New((char *) buffer).ToLocalChecked(); +#endif + } + else { + //we need to concatenate +#ifdef UNICODE + str = String::Concat(str, Nan::New((uint16_t*) buffer).ToLocalChecked()); +#else + str = String::Concat(str, Nan::New((char *) buffer).ToLocalChecked()); +#endif + } + + //if len is zero let's break out of the loop now and not attempt to + //call SQLGetData again. The specific reason for this is because + //some ODBC drivers may not correctly report SQL_NO_DATA the next + //time around causing an infinite loop here + if (len == 0) { + break; + } + + count += 1; + } + else { + //an error has occured + //possible values for ret are SQL_ERROR (-1) and SQL_INVALID_HANDLE (-2) + + //If we have an invalid handle, then stuff is way bad and we should abort + //immediately. Memory errors are bound to follow as we must be in an + //inconsisant state. + assert(ret != SQL_INVALID_HANDLE); + + //Not sure if throwing here will work out well for us but we can try + //since we should have a valid handle and the error is something we + //can look into + + Local objError = ODBC::GetSQLError( + SQL_HANDLE_STMT, + hStmt, + (char *) "[node-odbc] Error in ODBC::GetColumnValue" + ); + + Nan::ThrowError(objError); + return scope.Escape(Nan::Undefined()); + break; + } + } while (true); + + return scope.Escape(str); + } +} + +/* + * GetRecordTuple + */ + +Local ODBC::GetRecordTuple ( SQLHSTMT hStmt, Column* columns, + short* colCount, uint16_t* buffer, + int bufferLength) { + Nan::EscapableHandleScope scope; + + Local tuple = Nan::New(); + + for(int i = 0; i < *colCount; i++) { +#ifdef UNICODE + tuple->Set( Nan::New((uint16_t *) columns[i].name).ToLocalChecked(), + GetColumnValue( hStmt, columns[i], buffer, bufferLength)); +#else + tuple->Set( Nan::New((const char *) columns[i].name).ToLocalChecked(), + GetColumnValue( hStmt, columns[i], buffer, bufferLength)); +#endif + } + + return scope.Escape(tuple); +} + +/* + * GetRecordArray + */ + +Local ODBC::GetRecordArray ( SQLHSTMT hStmt, Column* columns, + short* colCount, uint16_t* buffer, + int bufferLength) { + Nan::EscapableHandleScope scope; + + Local array = Nan::New(); + + for(int i = 0; i < *colCount; i++) { + array->Set( Nan::New(i), + GetColumnValue( hStmt, columns[i], buffer, bufferLength)); + } + + return scope.Escape(array); +} + +/* + * GetParametersFromArray + */ + +Parameter* ODBC::GetParametersFromArray (Local values, int *paramCount) { + DEBUG_PRINTF("ODBC::GetParametersFromArray\n"); + *paramCount = values->Length(); + + Parameter* params = NULL; + + if (*paramCount > 0) { + params = (Parameter *) malloc(*paramCount * sizeof(Parameter)); + } + + for (int i = 0; i < *paramCount; i++) { + Local value = values->Get(i); + + params[i].ColumnSize = 0; + params[i].StrLen_or_IndPtr = SQL_NULL_DATA; + params[i].BufferLength = 0; + params[i].DecimalDigits = 0; + + DEBUG_PRINTF("ODBC::GetParametersFromArray - param[%i].length = %lli\n", + i, params[i].StrLen_or_IndPtr); + + if (value->IsString()) { + Local string = value->ToString(); + + params[i].ValueType = SQL_C_TCHAR; + params[i].ColumnSize = 0; //SQL_SS_LENGTH_UNLIMITED +#ifdef UNICODE + params[i].ParameterType = SQL_WVARCHAR; + params[i].BufferLength = (string->Length() * sizeof(uint16_t)) + sizeof(uint16_t); +#else + params[i].ParameterType = SQL_VARCHAR; + params[i].BufferLength = string->Utf8Length() + 1; +#endif + params[i].ParameterValuePtr = malloc(params[i].BufferLength); + params[i].StrLen_or_IndPtr = SQL_NTS;//params[i].BufferLength; + +#ifdef UNICODE + string->Write((uint16_t *) params[i].ParameterValuePtr); +#else + string->WriteUtf8((char *) params[i].ParameterValuePtr); +#endif + + DEBUG_PRINTF("ODBC::GetParametersFromArray - IsString(): params[%i] c_type=%i type=%i buffer_length=%lli size=%lli length=%lli value=%s\n", + i, params[i].ValueType, params[i].ParameterType, + params[i].BufferLength, params[i].ColumnSize, params[i].StrLen_or_IndPtr, + (char*) params[i].ParameterValuePtr); + } + else if (value->IsNull()) { + params[i].ValueType = SQL_C_DEFAULT; + params[i].ParameterType = SQL_VARCHAR; + params[i].StrLen_or_IndPtr = SQL_NULL_DATA; + + DEBUG_PRINTF("ODBC::GetParametersFromArray - IsNull(): params[%i] c_type=%i type=%i buffer_length=%lli size=%lli length=%lli\n", + i, params[i].ValueType, params[i].ParameterType, + params[i].BufferLength, params[i].ColumnSize, params[i].StrLen_or_IndPtr); + } + else if (value->IsInt32()) { + int64_t *number = new int64_t(value->IntegerValue()); + params[i].ValueType = SQL_C_SBIGINT; + params[i].ParameterType = SQL_BIGINT; + params[i].ParameterValuePtr = number; + params[i].StrLen_or_IndPtr = 0; + + DEBUG_PRINTF("ODBC::GetParametersFromArray - IsInt32(): params[%i] c_type=%i type=%i buffer_length=%lli size=%lli length=%lli value=%lld\n", + i, params[i].ValueType, params[i].ParameterType, + params[i].BufferLength, params[i].ColumnSize, params[i].StrLen_or_IndPtr, + *number); + } + else if (value->IsNumber()) { + double *number = new double(value->NumberValue()); + + params[i].ValueType = SQL_C_DOUBLE; + params[i].ParameterType = SQL_DECIMAL; + params[i].ParameterValuePtr = number; + params[i].BufferLength = sizeof(double); + params[i].StrLen_or_IndPtr = params[i].BufferLength; + params[i].DecimalDigits = 7; + params[i].ColumnSize = sizeof(double); + + DEBUG_PRINTF("ODBC::GetParametersFromArray - IsNumber(): params[%i] c_type=%i type=%i buffer_length=%lli size=%lli length=%lli value=%f\n", + i, params[i].ValueType, params[i].ParameterType, + params[i].BufferLength, params[i].ColumnSize, params[i].StrLen_or_IndPtr, + *number); + } + else if (value->IsBoolean()) { + bool *boolean = new bool(value->BooleanValue()); + params[i].ValueType = SQL_C_BIT; + params[i].ParameterType = SQL_BIT; + params[i].ParameterValuePtr = boolean; + params[i].StrLen_or_IndPtr = 0; + + DEBUG_PRINTF("ODBC::GetParametersFromArray - IsBoolean(): params[%i] c_type=%i type=%i buffer_length=%lli size=%lli length=%lli\n", + i, params[i].ValueType, params[i].ParameterType, + params[i].BufferLength, params[i].ColumnSize, params[i].StrLen_or_IndPtr); + } + } + + return params; +} + +/* + * CallbackSQLError + */ + +Handle ODBC::CallbackSQLError (SQLSMALLINT handleType, + SQLHANDLE handle, + Nan::Callback* cb) { + Nan::EscapableHandleScope scope; + + return scope.Escape(CallbackSQLError( + handleType, + handle, + (char *) "[node-odbc] SQL_ERROR", + cb)); +} + +Local ODBC::CallbackSQLError (SQLSMALLINT handleType, + SQLHANDLE handle, + char* message, + Nan::Callback* cb) { + Nan::EscapableHandleScope scope; + + Local objError = ODBC::GetSQLError( + handleType, + handle, + message + ); + + Local info[1]; + info[0] = objError; + cb->Call(1, info); + + return scope.Escape(Nan::Undefined()); +} + +/* + * GetSQLError + */ + +Local ODBC::GetSQLError (SQLSMALLINT handleType, SQLHANDLE handle) { + Nan::EscapableHandleScope scope; + + return scope.Escape(GetSQLError( + handleType, + handle, + (char *) "[node-odbc] SQL_ERROR")); +} + +Local ODBC::GetSQLError (SQLSMALLINT handleType, SQLHANDLE handle, char* message) { + Nan::EscapableHandleScope scope; + + DEBUG_PRINTF("ODBC::GetSQLError : handleType=%i, handle=%p\n", handleType, handle); + + Local objError = Nan::New(); + + int32_t i = 0; + SQLINTEGER native; + + SQLSMALLINT len; + SQLINTEGER statusRecCount; + SQLRETURN ret; + char errorSQLState[14]; + char errorMessage[ERROR_MESSAGE_BUFFER_BYTES]; + + ret = SQLGetDiagField( + handleType, + handle, + 0, + SQL_DIAG_NUMBER, + &statusRecCount, + SQL_IS_INTEGER, + &len); + + // Windows seems to define SQLINTEGER as long int, unixodbc as just int... %i should cover both + DEBUG_PRINTF("ODBC::GetSQLError : called SQLGetDiagField; ret=%i, statusRecCount=%i\n", ret, statusRecCount); + + Local errors = Nan::New(); + objError->Set(Nan::New("errors").ToLocalChecked(), errors); + + for (i = 0; i < statusRecCount; i++){ + DEBUG_PRINTF("ODBC::GetSQLError : calling SQLGetDiagRec; i=%i, statusRecCount=%i\n", i, statusRecCount); + + ret = SQLGetDiagRec( + handleType, + handle, + (SQLSMALLINT)(i + 1), + (SQLTCHAR *) errorSQLState, + &native, + (SQLTCHAR *) errorMessage, + ERROR_MESSAGE_BUFFER_CHARS, + &len); + + DEBUG_PRINTF("ODBC::GetSQLError : after SQLGetDiagRec; i=%i\n", i); + + if (SQL_SUCCEEDED(ret)) { + DEBUG_PRINTF("ODBC::GetSQLError : errorMessage=%s, errorSQLState=%s\n", errorMessage, errorSQLState); + + if (i == 0) { + // First error is assumed the primary error + objError->Set(Nan::New("error").ToLocalChecked(), Nan::New(message).ToLocalChecked()); +#ifdef UNICODE + Nan::SetPrototype(objError, Exception::Error(Nan::New((uint16_t *) errorMessage).ToLocalChecked())); + objError->Set(Nan::New("message").ToLocalChecked(), Nan::New((uint16_t *)errorMessage).ToLocalChecked()); + objError->Set(Nan::New("state").ToLocalChecked(), Nan::New((uint16_t *)errorSQLState).ToLocalChecked()); +#else + Nan::SetPrototype(objError, Exception::Error(Nan::New(errorMessage).ToLocalChecked())); + objError->Set(Nan::New("message").ToLocalChecked(), Nan::New(errorMessage).ToLocalChecked()); + objError->Set(Nan::New("state").ToLocalChecked(), Nan::New(errorSQLState).ToLocalChecked()); +#endif + } + + Local subError = Nan::New(); + +#ifdef UNICODE + subError->Set(Nan::New("message").ToLocalChecked(), Nan::New((uint16_t *)errorMessage).ToLocalChecked()); + subError->Set(Nan::New("state").ToLocalChecked(), Nan::New((uint16_t *)errorSQLState).ToLocalChecked()); +#else + subError->Set(Nan::New("message").ToLocalChecked(), Nan::New(errorMessage).ToLocalChecked()); + subError->Set(Nan::New("state").ToLocalChecked(), Nan::New(errorSQLState).ToLocalChecked()); +#endif + errors->Set(Nan::New(i), subError); + + } else if (ret == SQL_NO_DATA) { + break; + } + } + + if (statusRecCount == 0) { + //Create a default error object if there were no diag records + objError->Set(Nan::New("error").ToLocalChecked(), Nan::New(message).ToLocalChecked()); + Nan::SetPrototype(objError, Exception::Error(Nan::New(message).ToLocalChecked())); + objError->Set(Nan::New("message").ToLocalChecked(), Nan::New( + (const char *) "[node-odbc] An error occurred but no diagnostic information was available.").ToLocalChecked()); + } + + return scope.Escape(objError); +} + +/* + * GetAllRecordsSync + */ + +Local ODBC::GetAllRecordsSync (HENV hENV, + HDBC hDBC, + HSTMT hSTMT, + uint16_t* buffer, + int bufferLength) { + DEBUG_PRINTF("ODBC::GetAllRecordsSync\n"); + + Nan::EscapableHandleScope scope; + + Local objError = Nan::New(); + + int count = 0; + int errorCount = 0; + short colCount = 0; + + Column* columns = GetColumns(hSTMT, &colCount); + + Local rows = Nan::New(); + + //loop through all records + while (true) { + SQLRETURN ret = SQLFetch(hSTMT); + + //check to see if there was an error + if (ret == SQL_ERROR) { + //TODO: what do we do when we actually get an error here... + //should we throw?? + + errorCount++; + + objError = ODBC::GetSQLError( + SQL_HANDLE_STMT, + hSTMT, + (char *) "[node-odbc] Error in ODBC::GetAllRecordsSync" + ); + + break; + } + + //check to see if we are at the end of the recordset + if (ret == SQL_NO_DATA) { + ODBC::FreeColumns(columns, &colCount); + + break; + } + + rows->Set( + Nan::New(count), + ODBC::GetRecordTuple( + hSTMT, + columns, + &colCount, + buffer, + bufferLength) + ); + + count++; + } + //TODO: what do we do about errors!?! + //we throw them + return scope.Escape(rows); +} + +#ifdef dynodbc +NAN_METHOD(ODBC::LoadODBCLibrary) { + Nan::HandleScope scope; + + REQ_STR_ARG(0, js_library); + + bool result = DynLoadODBC(*js_library); + + info.GetReturnValue().Set((result) ? Nan::True() : Nan::False()); +} +#endif + +extern "C" void init(v8::Handle exports) { +#ifdef dynodbc + exports->Set(Nan::New("loadODBCLibrary").ToLocalChecked(), + Nan::New(ODBC::LoadODBCLibrary)->GetFunction()); +#endif + + ODBC::Init(exports); + ODBCResult::Init(exports); + ODBCConnection::Init(exports); + ODBCStatement::Init(exports); +} + +NODE_MODULE(odbc_bindings, init) diff --git a/src/odbc.h b/src/odbc.h new file mode 100644 index 00000000..550fd451 --- /dev/null +++ b/src/odbc.h @@ -0,0 +1,247 @@ +/* + Copyright (c) 2013, Dan VerWeire + Copyright (c) 2010, Lee Smith + + Permission to use, copy, modify, and/or distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ + +#ifndef _SRC_ODBC_H +#define _SRC_ODBC_H + +#include +#include +#include +#include + +#include +#ifdef dynodbc +#include "dynodbc.h" +#else +#include +#include +#include +#include +#endif + +using namespace v8; +using namespace node; + +#define MAX_FIELD_SIZE 1024 +#define MAX_VALUE_SIZE 1048576 + +#ifdef UNICODE +#define ERROR_MESSAGE_BUFFER_BYTES 2048 +#define ERROR_MESSAGE_BUFFER_CHARS 1024 +#else +#define ERROR_MESSAGE_BUFFER_BYTES 2048 +#define ERROR_MESSAGE_BUFFER_CHARS 2048 +#endif + +#define MODE_COLLECT_AND_CALLBACK 1 +#define MODE_CALLBACK_FOR_EACH 2 +#define FETCH_ARRAY 3 +#define FETCH_OBJECT 4 +#define SQL_DESTROY 9999 + + +typedef struct { + unsigned char *name; + unsigned int len; + SQLLEN type; + SQLUSMALLINT index; +} Column; + +typedef struct { + SQLSMALLINT ValueType; + SQLSMALLINT ParameterType; + SQLLEN ColumnSize; + SQLSMALLINT DecimalDigits; + void *ParameterValuePtr; + SQLLEN BufferLength; + SQLLEN StrLen_or_IndPtr; +} Parameter; + +class ODBC : public Nan::ObjectWrap { + public: + static Nan::Persistent constructor; + static uv_mutex_t g_odbcMutex; + + static void Init(v8::Handle exports); + static Column* GetColumns(SQLHSTMT hStmt, short* colCount); + static void FreeColumns(Column* columns, short* colCount); + static Handle GetColumnValue(SQLHSTMT hStmt, Column column, uint16_t* buffer, int bufferLength); + static Local GetRecordTuple (SQLHSTMT hStmt, Column* columns, short* colCount, uint16_t* buffer, int bufferLength); + static Local GetRecordArray (SQLHSTMT hStmt, Column* columns, short* colCount, uint16_t* buffer, int bufferLength); + static Handle CallbackSQLError(SQLSMALLINT handleType, SQLHANDLE handle, Nan::Callback* cb); + static Local CallbackSQLError (SQLSMALLINT handleType, SQLHANDLE handle, char* message, Nan::Callback* cb); + static Local GetSQLError (SQLSMALLINT handleType, SQLHANDLE handle); + static Local GetSQLError (SQLSMALLINT handleType, SQLHANDLE handle, char* message); + static Local GetAllRecordsSync (HENV hENV, HDBC hDBC, HSTMT hSTMT, uint16_t* buffer, int bufferLength); +#ifdef dynodbc + static NAN_METHOD(LoadODBCLibrary); +#endif + static Parameter* GetParametersFromArray (Local values, int* paramCount); + + void Free(); + + protected: + ODBC() {} + + ~ODBC(); + + public: + static NAN_METHOD(New); + + //async methods + static NAN_METHOD(CreateConnection); + protected: + static void UV_CreateConnection(uv_work_t* work_req); + static void UV_AfterCreateConnection(uv_work_t* work_req, int status); + + static void WatcherCallback(uv_async_t* w, int revents); + + //sync methods + public: + static NAN_METHOD(CreateConnectionSync); + protected: + + ODBC *self(void) { return this; } + + HENV m_hEnv; +}; + +struct create_connection_work_data { + Nan::Callback* cb; + ODBC *dbo; + HDBC hDBC; + int result; +}; + +struct open_request { + Nan::Persistent cb; + ODBC *dbo; + int result; + char connection[1]; +}; + +struct close_request { + Nan::Persistent cb; + ODBC *dbo; + int result; +}; + +struct query_request { + Nan::Persistent cb; + ODBC *dbo; + HSTMT hSTMT; + int affectedRows; + char *sql; + char *catalog; + char *schema; + char *table; + char *type; + char *column; + Parameter *params; + int paramCount; + int result; +}; + +#ifdef UNICODE +#define SQL_T(x) (L##x) +#else +#define SQL_T(x) (x) +#endif + +#ifdef DEBUG +#define DEBUG_TPRINTF(...) fprintf(stdout, __VA_ARGS__) +#define DEBUG_PRINTF(...) fprintf(stdout, __VA_ARGS__) +#else +#define DEBUG_PRINTF(...) (void)0 +#define DEBUG_TPRINTF(...) (void)0 +#endif + +#define REQ_ARGS(N) \ + if (info.Length() < (N)) \ + return Nan::ThrowTypeError("Expected " #N "arguments"); + +//Require String Argument; Save String as Utf8 +#define REQ_STR_ARG(I, VAR) \ + if (info.Length() <= (I) || !info[I]->IsString()) \ + return Nan::ThrowTypeError("Argument " #I " must be a string"); \ + String::Utf8Value VAR(info[I]->ToString()); + +//Require String Argument; Save String as Wide String (UCS2) +#define REQ_WSTR_ARG(I, VAR) \ + if (info.Length() <= (I) || !info[I]->IsString()) \ + return Nan::ThrowTypeError("Argument " #I " must be a string"); \ + String::Value VAR(info[I]->ToString()); + +//Require String Argument; Save String as Object +#define REQ_STRO_ARG(I, VAR) \ + if (info.Length() <= (I) || !info[I]->IsString()) \ + return Nan::ThrowTypeError("Argument " #I " must be a string"); \ + Local VAR(info[I]->ToString()); + +//Require String or Null Argument; Save String as Utf8 +#define REQ_STR_OR_NULL_ARG(I, VAR) \ + if ( info.Length() <= (I) || (!info[I]->IsString() && !info[I]->IsNull()) ) \ + return Nan::ThrowTypeError("Argument " #I " must be a string or null"); \ + String::Utf8Value VAR(info[I]->ToString()); + +//Require String or Null Argument; Save String as Wide String (UCS2) +#define REQ_WSTR_OR_NULL_ARG(I, VAR) \ + if ( info.Length() <= (I) || (!info[I]->IsString() && !info[I]->IsNull()) ) \ + return Nan::ThrowTypeError("Argument " #I " must be a string or null"); \ + String::Value VAR(info[I]->ToString()); + +//Require String or Null Argument; save String as String Object +#define REQ_STRO_OR_NULL_ARG(I, VAR) \ + if ( info.Length() <= (I) || (!info[I]->IsString() && !info[I]->IsNull()) ) { \ + Nan::ThrowTypeError("Argument " #I " must be a string or null"); \ + return; \ + } \ + Local VAR(info[I]->ToString()); + +#define REQ_FUN_ARG(I, VAR) \ + if (info.Length() <= (I) || !info[I]->IsFunction()) \ + return Nan::ThrowTypeError("Argument " #I " must be a function"); \ + Local VAR = Local::Cast(info[I]); + +#define REQ_BOOL_ARG(I, VAR) \ + if (info.Length() <= (I) || !info[I]->IsBoolean()) \ + return Nan::ThrowTypeError("Argument " #I " must be a boolean"); \ + Local VAR = (info[I]->ToBoolean()); + +#define REQ_EXT_ARG(I, VAR) \ + if (info.Length() <= (I) || !info[I]->IsExternal()) \ + return Nan::ThrowTypeError("Argument " #I " invalid"); \ + Local VAR = Local::Cast(info[I]); + +#define OPT_INT_ARG(I, VAR, DEFAULT) \ + int VAR; \ + if (info.Length() <= (I)) { \ + VAR = (DEFAULT); \ + } else if (info[I]->IsInt32()) { \ + VAR = info[I]->Int32Value(); \ + } else { \ + return Nan::ThrowTypeError("Argument " #I " must be an integer"); \ + } + + +// From node v10 NODE_DEFINE_CONSTANT +#define NODE_ODBC_DEFINE_CONSTANT(constructor_template, constant) \ + (constructor_template)->Set(Nan::New(#constant).ToLocalChecked(), \ + Nan::New(constant), \ + static_cast(v8::ReadOnly|v8::DontDelete)) + +#endif diff --git a/src/odbc_connection.cpp b/src/odbc_connection.cpp new file mode 100644 index 00000000..3e748ee1 --- /dev/null +++ b/src/odbc_connection.cpp @@ -0,0 +1,1713 @@ +/* + Copyright (c) 2013, Dan VerWeire + Copyright (c) 2010, Lee Smith + + Permission to use, copy, modify, and/or distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ + +#include +#include +#include +#include +#include +#include + +#include "odbc.h" +#include "odbc_connection.h" +#include "odbc_result.h" +#include "odbc_statement.h" + +using namespace v8; +using namespace node; + +Nan::Persistent ODBCConnection::constructor; +Nan::Persistent ODBCConnection::OPTION_SQL; +Nan::Persistent ODBCConnection::OPTION_PARAMS; +Nan::Persistent ODBCConnection::OPTION_NORESULTS; + +void ODBCConnection::Init(v8::Handle exports) { + DEBUG_PRINTF("ODBCConnection::Init\n"); + Nan::HandleScope scope; + + OPTION_SQL.Reset(Nan::New("sql").ToLocalChecked()); + OPTION_PARAMS.Reset(Nan::New("params").ToLocalChecked()); + OPTION_NORESULTS.Reset(Nan::New("noResults").ToLocalChecked()); + + Local constructor_template = Nan::New(New); + + // Constructor Template + constructor_template->SetClassName(Nan::New("ODBCConnection").ToLocalChecked()); + + // Reserve space for one Handle + Local instance_template = constructor_template->InstanceTemplate(); + instance_template->SetInternalFieldCount(1); + + // Properties + //Nan::SetAccessor(instance_template, Nan::New("mode").ToLocalChecked(), ModeGetter, ModeSetter); + Nan::SetAccessor(instance_template, Nan::New("connected").ToLocalChecked(), ConnectedGetter); + Nan::SetAccessor(instance_template, Nan::New("connectTimeout").ToLocalChecked(), ConnectTimeoutGetter, ConnectTimeoutSetter); + Nan::SetAccessor(instance_template, Nan::New("loginTimeout").ToLocalChecked(), LoginTimeoutGetter, LoginTimeoutSetter); + + // Prototype Methods + Nan::SetPrototypeMethod(constructor_template, "open", Open); + Nan::SetPrototypeMethod(constructor_template, "openSync", OpenSync); + Nan::SetPrototypeMethod(constructor_template, "close", Close); + Nan::SetPrototypeMethod(constructor_template, "closeSync", CloseSync); + Nan::SetPrototypeMethod(constructor_template, "createStatement", CreateStatement); + Nan::SetPrototypeMethod(constructor_template, "createStatementSync", CreateStatementSync); + Nan::SetPrototypeMethod(constructor_template, "query", Query); + Nan::SetPrototypeMethod(constructor_template, "querySync", QuerySync); + + Nan::SetPrototypeMethod(constructor_template, "beginTransaction", BeginTransaction); + Nan::SetPrototypeMethod(constructor_template, "beginTransactionSync", BeginTransactionSync); + Nan::SetPrototypeMethod(constructor_template, "endTransaction", EndTransaction); + Nan::SetPrototypeMethod(constructor_template, "endTransactionSync", EndTransactionSync); + + Nan::SetPrototypeMethod(constructor_template, "getInfoSync", GetInfoSync); + + Nan::SetPrototypeMethod(constructor_template, "columns", Columns); + Nan::SetPrototypeMethod(constructor_template, "tables", Tables); + + // Attach the Database Constructor to the target object + constructor.Reset(constructor_template->GetFunction()); + exports->Set( Nan::New("ODBCConnection").ToLocalChecked(), constructor_template->GetFunction()); +} + +ODBCConnection::~ODBCConnection() { + DEBUG_PRINTF("ODBCConnection::~ODBCConnection\n"); + this->Free(); +} + +void ODBCConnection::Free() { + DEBUG_PRINTF("ODBCConnection::Free\n"); + if (m_hDBC) { + uv_mutex_lock(&ODBC::g_odbcMutex); + + if (m_hDBC) { + SQLDisconnect(m_hDBC); + SQLFreeHandle(SQL_HANDLE_DBC, m_hDBC); + m_hDBC = NULL; + } + + uv_mutex_unlock(&ODBC::g_odbcMutex); + } +} + +/* + * New + */ + +NAN_METHOD(ODBCConnection::New) { + DEBUG_PRINTF("ODBCConnection::New\n"); + Nan::HandleScope scope; + + REQ_EXT_ARG(0, js_henv); + REQ_EXT_ARG(1, js_hdbc); + + HENV hENV = static_cast(js_henv->Value()); + HDBC hDBC = static_cast(js_hdbc->Value()); + + ODBCConnection* conn = new ODBCConnection(hENV, hDBC); + + conn->Wrap(info.Holder()); + + //set default connectTimeout to 0 seconds + conn->connectTimeout = 0; + //set default loginTimeout to 5 seconds + conn->loginTimeout = 5; + + info.GetReturnValue().Set(info.Holder()); +} + +NAN_GETTER(ODBCConnection::ConnectedGetter) { + Nan::HandleScope scope; + + ODBCConnection *obj = Nan::ObjectWrap::Unwrap(info.Holder()); + + info.GetReturnValue().Set(obj->connected ? Nan::True() : Nan::False()); +} + +NAN_GETTER(ODBCConnection::ConnectTimeoutGetter) { + Nan::HandleScope scope; + + ODBCConnection *obj = Nan::ObjectWrap::Unwrap(info.Holder()); + + info.GetReturnValue().Set(Nan::New(obj->connectTimeout)); +} + +NAN_SETTER(ODBCConnection::ConnectTimeoutSetter) { + Nan::HandleScope scope; + + ODBCConnection *obj = Nan::ObjectWrap::Unwrap(info.Holder()); + + if (value->IsNumber()) { + obj->connectTimeout = value->Uint32Value(); + } +} + +NAN_GETTER(ODBCConnection::LoginTimeoutGetter) { + Nan::HandleScope scope; + + ODBCConnection *obj = Nan::ObjectWrap::Unwrap(info.Holder()); + + info.GetReturnValue().Set(Nan::New(obj->loginTimeout)); +} + +NAN_SETTER(ODBCConnection::LoginTimeoutSetter) { + Nan::HandleScope scope; + + ODBCConnection *obj = Nan::ObjectWrap::Unwrap(info.Holder()); + + if (value->IsNumber()) { + obj->loginTimeout = value->Uint32Value(); + } +} + +/* + * Open + * + */ + +//Handle ODBCConnection::Open(const Arguments& info) { +NAN_METHOD(ODBCConnection::Open) { + DEBUG_PRINTF("ODBCConnection::Open\n"); + Nan::HandleScope scope; + + REQ_STRO_ARG(0, connection); + REQ_FUN_ARG(1, cb); + + //get reference to the connection object + ODBCConnection* conn = Nan::ObjectWrap::Unwrap(info.Holder()); + + //create a uv work request + uv_work_t* work_req = (uv_work_t *) (calloc(1, sizeof(uv_work_t))); + + //allocate our worker data + open_connection_work_data* data = (open_connection_work_data *) + calloc(1, sizeof(open_connection_work_data)); + + //copy the connection string to the work data +#ifdef UNICODE + data->connectionLength = connection->Length() + 1; + data->connection = (uint16_t *) malloc(sizeof(uint16_t) * data->connectionLength); + connection->Write((uint16_t*) data->connection); +#else + data->connectionLength = connection->Utf8Length() + 1; + data->connection = (char *) malloc(sizeof(char) * data->connectionLength); + connection->WriteUtf8((char*) data->connection); +#endif + + data->cb = new Nan::Callback(cb); + data->conn = conn; + + work_req->data = data; + + //queue the work + uv_queue_work(uv_default_loop(), + work_req, + UV_Open, + (uv_after_work_cb)UV_AfterOpen); + + conn->Ref(); + + info.GetReturnValue().Set(info.Holder()); +} + +void ODBCConnection::UV_Open(uv_work_t* req) { + DEBUG_PRINTF("ODBCConnection::UV_Open\n"); + open_connection_work_data* data = (open_connection_work_data *)(req->data); + + ODBCConnection* self = data->conn->self(); + + DEBUG_PRINTF("ODBCConnection::UV_Open : connectTimeout=%i, loginTimeout = %i\n", *&(self->connectTimeout), *&(self->loginTimeout)); + + uv_mutex_lock(&ODBC::g_odbcMutex); + + if (self->connectTimeout > 0) { + //NOTE: SQLSetConnectAttr requires the thread to be locked + SQLSetConnectAttr( + self->m_hDBC, //ConnectionHandle + SQL_ATTR_CONNECTION_TIMEOUT, //Attribute + (SQLPOINTER) size_t(self->connectTimeout), //ValuePtr + SQL_IS_UINTEGER); //StringLength + } + + if (self->loginTimeout > 0) { + //NOTE: SQLSetConnectAttr requires the thread to be locked + SQLSetConnectAttr( + self->m_hDBC, //ConnectionHandle + SQL_ATTR_LOGIN_TIMEOUT, //Attribute + (SQLPOINTER) size_t(self->loginTimeout), //ValuePtr + SQL_IS_UINTEGER); //StringLength + } + + //Attempt to connect + //NOTE: SQLDriverConnect requires the thread to be locked + int ret = SQLDriverConnect( + self->m_hDBC, //ConnectionHandle + NULL, //WindowHandle + (SQLTCHAR*) data->connection, //InConnectionString + data->connectionLength, //StringLength1 + NULL, //OutConnectionString + 0, //BufferLength - in characters + NULL, //StringLength2Ptr + SQL_DRIVER_NOPROMPT); //DriverCompletion + + if (SQL_SUCCEEDED(ret)) { + HSTMT hStmt; + + //allocate a temporary statment + ret = SQLAllocHandle(SQL_HANDLE_STMT, self->m_hDBC, &hStmt); + + //try to determine if the driver can handle + //multiple recordsets + ret = SQLGetFunctions( + self->m_hDBC, + SQL_API_SQLMORERESULTS, + &(self->canHaveMoreResults)); + + if (!SQL_SUCCEEDED(ret)) { + self->canHaveMoreResults = 0; + } + + //free the handle + ret = SQLFreeHandle( SQL_HANDLE_STMT, hStmt); + } + + uv_mutex_unlock(&ODBC::g_odbcMutex); + + data->result = ret; +} + +void ODBCConnection::UV_AfterOpen(uv_work_t* req, int status) { + DEBUG_PRINTF("ODBCConnection::UV_AfterOpen\n"); + Nan::HandleScope scope; + + open_connection_work_data* data = (open_connection_work_data *)(req->data); + + Local argv[1]; + + bool err = false; + + if (data->result) { + err = true; + + Local objError = ODBC::GetSQLError(SQL_HANDLE_DBC, data->conn->self()->m_hDBC); + + argv[0] = objError; + } + + if (!err) { + data->conn->self()->connected = true; + } + + Nan::TryCatch try_catch; + + data->conn->Unref(); + data->cb->Call(data->conn->handle(), err ? 1 : 0, argv); + + if (try_catch.HasCaught()) { + Nan::FatalException(try_catch); + } + + delete data->cb; + + free(data->connection); + free(data); + free(req); +} + +/* + * OpenSync + */ + +NAN_METHOD(ODBCConnection::OpenSync) { + DEBUG_PRINTF("ODBCConnection::OpenSync\n"); + Nan::HandleScope scope; + + REQ_STRO_ARG(0, connection); + + //get reference to the connection object + ODBCConnection* conn = Nan::ObjectWrap::Unwrap(info.Holder()); + + DEBUG_PRINTF("ODBCConnection::OpenSync : connectTimeout=%i, loginTimeout = %i\n", *&(conn->connectTimeout), *&(conn->loginTimeout)); + + Local objError; + SQLRETURN ret; + bool err = false; + +#ifdef UNICODE + int connectionLength = connection->Length() + 1; + uint16_t* connectionString = (uint16_t *) malloc(connectionLength * sizeof(uint16_t)); + connection->Write(connectionString); +#else + int connectionLength = connection->Utf8Length() + 1; + char* connectionString = (char *) malloc(connectionLength); + connection->WriteUtf8(connectionString); +#endif + + uv_mutex_lock(&ODBC::g_odbcMutex); + + if (conn->connectTimeout > 0) { + //NOTE: SQLSetConnectAttr requires the thread to be locked + SQLSetConnectAttr( + conn->m_hDBC, //ConnectionHandle + SQL_ATTR_CONNECTION_TIMEOUT, //Attribute + (SQLPOINTER) size_t(conn->connectTimeout), //ValuePtr + SQL_IS_UINTEGER); //StringLength + } + + if (conn->loginTimeout > 0) { + //NOTE: SQLSetConnectAttr requires the thread to be locked + SQLSetConnectAttr( + conn->m_hDBC, //ConnectionHandle + SQL_ATTR_LOGIN_TIMEOUT, //Attribute + (SQLPOINTER) size_t(conn->loginTimeout), //ValuePtr + SQL_IS_UINTEGER); //StringLength + } + + //Attempt to connect + //NOTE: SQLDriverConnect requires the thread to be locked + ret = SQLDriverConnect( + conn->m_hDBC, //ConnectionHandle + NULL, //WindowHandle + (SQLTCHAR*) connectionString, //InConnectionString + connectionLength, //StringLength1 + NULL, //OutConnectionString + 0, //BufferLength - in characters + NULL, //StringLength2Ptr + SQL_DRIVER_NOPROMPT); //DriverCompletion + + if (!SQL_SUCCEEDED(ret)) { + err = true; + + objError = ODBC::GetSQLError(SQL_HANDLE_DBC, conn->self()->m_hDBC); + } + else { + HSTMT hStmt; + + //allocate a temporary statment + ret = SQLAllocHandle(SQL_HANDLE_STMT, conn->m_hDBC, &hStmt); + + //try to determine if the driver can handle + //multiple recordsets + ret = SQLGetFunctions( + conn->m_hDBC, + SQL_API_SQLMORERESULTS, + &(conn->canHaveMoreResults)); + + if (!SQL_SUCCEEDED(ret)) { + conn->canHaveMoreResults = 0; + } + + //free the handle + ret = SQLFreeHandle( SQL_HANDLE_STMT, hStmt); + + conn->self()->connected = true; + } + + uv_mutex_unlock(&ODBC::g_odbcMutex); + + free(connectionString); + + if (err) { + return Nan::ThrowError(objError); + } + else { + info.GetReturnValue().Set(Nan::True()); + } +} + +/* + * Close + * + */ + +NAN_METHOD(ODBCConnection::Close) { + DEBUG_PRINTF("ODBCConnection::Close\n"); + Nan::HandleScope scope; + + REQ_FUN_ARG(0, cb); + + ODBCConnection* conn = Nan::ObjectWrap::Unwrap(info.Holder()); + + uv_work_t* work_req = (uv_work_t *) (calloc(1, sizeof(uv_work_t))); + + close_connection_work_data* data = (close_connection_work_data *) + (calloc(1, sizeof(close_connection_work_data))); + + data->cb = new Nan::Callback(cb); + data->conn = conn; + + work_req->data = data; + + uv_queue_work( + uv_default_loop(), + work_req, + UV_Close, + (uv_after_work_cb)UV_AfterClose); + + conn->Ref(); + + info.GetReturnValue().Set(Nan::Undefined()); +} + +void ODBCConnection::UV_Close(uv_work_t* req) { + DEBUG_PRINTF("ODBCConnection::UV_Close\n"); + close_connection_work_data* data = (close_connection_work_data *)(req->data); + ODBCConnection* conn = data->conn; + + //TODO: check to see if there are any open statements + //on this connection + + conn->Free(); + + data->result = 0; +} + +void ODBCConnection::UV_AfterClose(uv_work_t* req, int status) { + DEBUG_PRINTF("ODBCConnection::UV_AfterClose\n"); + Nan::HandleScope scope; + + close_connection_work_data* data = (close_connection_work_data *)(req->data); + + ODBCConnection* conn = data->conn; + + Local argv[1]; + bool err = false; + + if (data->result) { + err = true; + argv[0] = Exception::Error(Nan::New("Error closing database").ToLocalChecked()); + } + else { + conn->connected = false; + } + + Nan::TryCatch try_catch; + + data->conn->Unref(); + data->cb->Call(err ? 1 : 0, argv); + + if (try_catch.HasCaught()) { + Nan::FatalException(try_catch); + } + + delete data->cb; + + free(data); + free(req); +} + +/* + * CloseSync + */ + +NAN_METHOD(ODBCConnection::CloseSync) { + DEBUG_PRINTF("ODBCConnection::CloseSync\n"); + Nan::HandleScope scope; + + ODBCConnection* conn = Nan::ObjectWrap::Unwrap(info.Holder()); + + //TODO: check to see if there are any open statements + //on this connection + + conn->Free(); + + conn->connected = false; + + info.GetReturnValue().Set(Nan::True()); +} + +/* + * CreateStatementSync + * + */ + +NAN_METHOD(ODBCConnection::CreateStatementSync) { + DEBUG_PRINTF("ODBCConnection::CreateStatementSync\n"); + Nan::HandleScope scope; + + ODBCConnection* conn = Nan::ObjectWrap::Unwrap(info.Holder()); + + HSTMT hSTMT; + + uv_mutex_lock(&ODBC::g_odbcMutex); + + SQLAllocHandle( + SQL_HANDLE_STMT, + conn->m_hDBC, + &hSTMT); + + uv_mutex_unlock(&ODBC::g_odbcMutex); + + Local params[3]; + params[0] = Nan::New(conn->m_hENV); + params[1] = Nan::New(conn->m_hDBC); + params[2] = Nan::New(hSTMT); + + Local js_result(Nan::NewInstance(Nan::New(ODBCStatement::constructor), 3, params).ToLocalChecked()); + + info.GetReturnValue().Set(js_result); +} + +/* + * CreateStatement + * + */ + +NAN_METHOD(ODBCConnection::CreateStatement) { + DEBUG_PRINTF("ODBCConnection::CreateStatement\n"); + Nan::HandleScope scope; + + REQ_FUN_ARG(0, cb); + + ODBCConnection* conn = Nan::ObjectWrap::Unwrap(info.Holder()); + + //initialize work request + uv_work_t* work_req = (uv_work_t *) (calloc(1, sizeof(uv_work_t))); + + //initialize our data + create_statement_work_data* data = + (create_statement_work_data *) (calloc(1, sizeof(create_statement_work_data))); + + data->cb = new Nan::Callback(cb); + data->conn = conn; + + work_req->data = data; + + uv_queue_work( + uv_default_loop(), + work_req, + UV_CreateStatement, + (uv_after_work_cb)UV_AfterCreateStatement); + + conn->Ref(); + + info.GetReturnValue().Set(Nan::Undefined()); +} + +void ODBCConnection::UV_CreateStatement(uv_work_t* req) { + DEBUG_PRINTF("ODBCConnection::UV_CreateStatement\n"); + + //get our work data + create_statement_work_data* data = (create_statement_work_data *)(req->data); + + DEBUG_PRINTF("ODBCConnection::UV_CreateStatement\n"); + //DEBUG_PRINTF("ODBCConnection::UV_CreateStatement m_hDBC=%X m_hDBC=%X m_hSTMT=%X\n", + // data->conn->m_hENV, + // data->conn->m_hDBC, + // data->hSTMT + //); + + uv_mutex_lock(&ODBC::g_odbcMutex); + + //allocate a new statment handle + SQLAllocHandle( SQL_HANDLE_STMT, + data->conn->m_hDBC, + &data->hSTMT); + + uv_mutex_unlock(&ODBC::g_odbcMutex); + + DEBUG_PRINTF("ODBCConnection::UV_CreateStatement\n"); + //DEBUG_PRINTF("ODBCConnection::UV_CreateStatement m_hDBC=%X m_hDBC=%X m_hSTMT=%X\n", + // data->conn->m_hENV, + // data->conn->m_hDBC, + // data->hSTMT + //); +} + +void ODBCConnection::UV_AfterCreateStatement(uv_work_t* req, int status) { + DEBUG_PRINTF("ODBCConnection::UV_AfterCreateStatement\n"); + Nan::HandleScope scope; + + create_statement_work_data* data = (create_statement_work_data *)(req->data); + + DEBUG_PRINTF("ODBCConnection::UV_AfterCreateStatement\n"); + //DEBUG_PRINTF("ODBCConnection::UV_AfterCreateStatement m_hDBC=%X m_hDBC=%X hSTMT=%X\n", + // data->conn->m_hENV, + // data->conn->m_hDBC, + // data->hSTMT + //); + + Local info[3]; + info[0] = Nan::New(data->conn->m_hENV); + info[1] = Nan::New(data->conn->m_hDBC); + info[2] = Nan::New(data->hSTMT); + + Local js_result = Nan::NewInstance(Nan::New(ODBCStatement::constructor), 3, info).ToLocalChecked(); + + info[0] = Nan::Null(); + info[1] = js_result; + + Nan::TryCatch try_catch; + + data->cb->Call( 2, info); + + if (try_catch.HasCaught()) { + Nan::FatalException(try_catch); + } + + data->conn->Unref(); + delete data->cb; + + free(data); + free(req); +} + +/* + * Query + */ + +NAN_METHOD(ODBCConnection::Query) { + DEBUG_PRINTF("ODBCConnection::Query\n"); + Nan::HandleScope scope; + + Local cb; + + Local sql; + + ODBCConnection* conn = Nan::ObjectWrap::Unwrap(info.Holder()); + + uv_work_t* work_req = (uv_work_t *) (calloc(1, sizeof(uv_work_t))); + + query_work_data* data = (query_work_data *) calloc(1, sizeof(query_work_data)); + + //Check arguments for different variations of calling this function + if (info.Length() == 3) { + //handle Query("sql string", [params], function cb () {}); + + if ( !info[0]->IsString() ) { + return Nan::ThrowTypeError("Argument 0 must be an String."); + } + else if ( !info[1]->IsArray() ) { + return Nan::ThrowTypeError("Argument 1 must be an Array."); + } + else if ( !info[2]->IsFunction() ) { + return Nan::ThrowTypeError("Argument 2 must be a Function."); + } + + sql = info[0]->ToString(); + + data->params = ODBC::GetParametersFromArray( + Local::Cast(info[1]), + &data->paramCount); + + cb = Local::Cast(info[2]); + } + else if (info.Length() == 2 ) { + //handle either Query("sql", cb) or Query({ settings }, cb) + + if (!info[1]->IsFunction()) { + return Nan::ThrowTypeError("ODBCConnection::Query(): Argument 1 must be a Function."); + } + + cb = Local::Cast(info[1]); + + if (info[0]->IsString()) { + //handle Query("sql", function cb () {}) + + sql = info[0]->ToString(); + + data->paramCount = 0; + } + else if (info[0]->IsObject()) { + //NOTE: going forward this is the way we should expand options + //rather than adding more arguments to the function signature. + //specify options on an options object. + //handle Query({}, function cb () {}); + + Local obj = info[0]->ToObject(); + + Local optionSqlKey = Nan::New(OPTION_SQL); + if (obj->Has(optionSqlKey) && obj->Get(optionSqlKey)->IsString()) { + sql = obj->Get(optionSqlKey)->ToString(); + } + else { + sql = Nan::New("").ToLocalChecked(); + } + + Local optionParamsKey = Nan::New(OPTION_PARAMS); + if (obj->Has(optionParamsKey) && obj->Get(optionParamsKey)->IsArray()) { + data->params = ODBC::GetParametersFromArray( + Local::Cast(obj->Get(optionParamsKey)), + &data->paramCount); + } + else { + data->paramCount = 0; + } + + Local optionNoResultsKey = Nan::New(OPTION_NORESULTS); + if (obj->Has(optionNoResultsKey) && obj->Get(optionNoResultsKey)->IsBoolean()) { + data->noResultObject = obj->Get(optionNoResultsKey)->ToBoolean()->Value(); + } + else { + data->noResultObject = false; + } + } + else { + return Nan::ThrowTypeError("ODBCConnection::Query(): Argument 0 must be a String or an Object."); + } + } + else { + return Nan::ThrowTypeError("ODBCConnection::Query(): Requires either 2 or 3 Arguments. "); + } + //Done checking arguments + + data->cb = new Nan::Callback(cb); + +#ifdef UNICODE + data->sqlLen = sql->Length(); + data->sqlSize = (data->sqlLen * sizeof(uint16_t)) + sizeof(uint16_t); + data->sql = (uint16_t *) malloc(data->sqlSize); + sql->Write((uint16_t *) data->sql); +#else + data->sqlLen = sql->Utf8Length(); + data->sqlSize = data->sqlLen + 1; + data->sql = (char *) malloc(data->sqlSize); + sql->WriteUtf8((char *) data->sql); +#endif + + DEBUG_PRINTF("ODBCConnection::Query : sqlLen=%i, sqlSize=%i, sql=%s\n", + data->sqlLen, data->sqlSize, (char*) data->sql); + + data->conn = conn; + work_req->data = data; + + uv_queue_work( + uv_default_loop(), + work_req, + UV_Query, + (uv_after_work_cb)UV_AfterQuery); + + conn->Ref(); + + info.GetReturnValue().Set(Nan::Undefined()); +} + +void ODBCConnection::UV_Query(uv_work_t* req) { + DEBUG_PRINTF("ODBCConnection::UV_Query\n"); + + query_work_data* data = (query_work_data *)(req->data); + + Parameter prm; + SQLRETURN ret; + + uv_mutex_lock(&ODBC::g_odbcMutex); + + //allocate a new statment handle + SQLAllocHandle( SQL_HANDLE_STMT, + data->conn->m_hDBC, + &data->hSTMT ); + + uv_mutex_unlock(&ODBC::g_odbcMutex); + + // SQLExecDirect will use bound parameters, but without the overhead of SQLPrepare + // for a single execution. + if (data->paramCount) { + for (int i = 0; i < data->paramCount; i++) { + prm = data->params[i]; + + + /*DEBUG_TPRINTF( + SQL_T("ODBCConnection::UV_Query - param[%i]: ValueType=%i type=%i BufferLength=%i size=%i length=%i &length=%X\n"), i, prm.ValueType, prm.ParameterType, + prm.BufferLength, prm.ColumnSize, prm.length, &data->params[i].length);*/ + + ret = SQLBindParameter( + data->hSTMT, //StatementHandle + i + 1, //ParameterNumber + SQL_PARAM_INPUT, //InputOutputType + prm.ValueType, + prm.ParameterType, + prm.ColumnSize, + prm.DecimalDigits, + prm.ParameterValuePtr, + prm.BufferLength, + &data->params[i].StrLen_or_IndPtr); + + if (ret == SQL_ERROR) { + data->result = ret; + return; + } + } + } + + // execute the query directly + ret = SQLExecDirect( + data->hSTMT, + (SQLTCHAR *)data->sql, + data->sqlLen); + + // this will be checked later in UV_AfterQuery + data->result = ret; +} + +void ODBCConnection::UV_AfterQuery(uv_work_t* req, int status) { + DEBUG_PRINTF("ODBCConnection::UV_AfterQuery\n"); + + Nan::HandleScope scope; + + query_work_data* data = (query_work_data *)(req->data); + + Nan::TryCatch try_catch; + + DEBUG_PRINTF("ODBCConnection::UV_AfterQuery : data->result=%i, data->noResultObject=%i\n", data->result, data->noResultObject); + + if (data->result != SQL_ERROR && data->noResultObject) { + //We have been requested to not create a result object + //this means we should release the handle now and call back + //with Nan::True() + + uv_mutex_lock(&ODBC::g_odbcMutex); + + SQLFreeHandle(SQL_HANDLE_STMT, data->hSTMT); + + uv_mutex_unlock(&ODBC::g_odbcMutex); + + Local info[2]; + info[0] = Nan::Null(); + info[1] = Nan::True(); + + data->cb->Call(2, info); + } + else { + Local info[4]; + bool* canFreeHandle = new bool(true); + + info[0] = Nan::New(data->conn->m_hENV); + info[1] = Nan::New(data->conn->m_hDBC); + info[2] = Nan::New(data->hSTMT); + info[3] = Nan::New(canFreeHandle); + + Local js_result = Nan::NewInstance(Nan::New(ODBCResult::constructor), 4, info).ToLocalChecked(); + + // Check now to see if there was an error (as there may be further result sets) + if (data->result == SQL_ERROR) { + info[0] = ODBC::GetSQLError(SQL_HANDLE_STMT, data->hSTMT, (char *) "[node-odbc] SQL_ERROR"); + } else { + info[0] = Nan::Null(); + } + info[1] = js_result; + + data->cb->Call(2, info); + } + + data->conn->Unref(); + + if (try_catch.HasCaught()) { + Nan::FatalException(try_catch); + } + + delete data->cb; + + if (data->paramCount) { + Parameter prm; + // free parameters + for (int i = 0; i < data->paramCount; i++) { + if (prm = data->params[i], prm.ParameterValuePtr != NULL) { + switch (prm.ValueType) { + case SQL_C_WCHAR: free(prm.ParameterValuePtr); break; + case SQL_C_CHAR: free(prm.ParameterValuePtr); break; + case SQL_C_LONG: delete (int64_t *)prm.ParameterValuePtr; break; + case SQL_C_DOUBLE: delete (double *)prm.ParameterValuePtr; break; + case SQL_C_BIT: delete (bool *)prm.ParameterValuePtr; break; + } + } + } + + free(data->params); + } + + free(data->sql); + free(data->catalog); + free(data->schema); + free(data->table); + free(data->type); + free(data->column); + free(data); + free(req); +} + + +/* + * QuerySync + */ + +NAN_METHOD(ODBCConnection::QuerySync) { + DEBUG_PRINTF("ODBCConnection::QuerySync\n"); + Nan::HandleScope scope; + +#ifdef UNICODE + String::Value* sql; +#else + String::Utf8Value* sql; +#endif + + ODBCConnection* conn = Nan::ObjectWrap::Unwrap(info.Holder()); + + Parameter* params = new Parameter[0]; + Parameter prm; + SQLRETURN ret; + HSTMT hSTMT; + int paramCount = 0; + bool noResultObject = false; + + //Check arguments for different variations of calling this function + if (info.Length() == 2) { + //handle QuerySync("sql string", [params]); + + if ( !info[0]->IsString() ) { + return Nan::ThrowTypeError("ODBCConnection::QuerySync(): Argument 0 must be an String."); + } + else if (!info[1]->IsArray()) { + return Nan::ThrowTypeError("ODBCConnection::QuerySync(): Argument 1 must be an Array."); + } + +#ifdef UNICODE + sql = new String::Value(info[0]->ToString()); +#else + sql = new String::Utf8Value(info[0]->ToString()); +#endif + + params = ODBC::GetParametersFromArray( + Local::Cast(info[1]), + ¶mCount); + + } + else if (info.Length() == 1 ) { + //handle either QuerySync("sql") or QuerySync({ settings }) + + if (info[0]->IsString()) { + //handle Query("sql") +#ifdef UNICODE + sql = new String::Value(info[0]->ToString()); +#else + sql = new String::Utf8Value(info[0]->ToString()); +#endif + + paramCount = 0; + } + else if (info[0]->IsObject()) { + //NOTE: going forward this is the way we should expand options + //rather than adding more arguments to the function signature. + //specify options on an options object. + //handle Query({}, function cb () {}); + + Local obj = info[0]->ToObject(); + + Local optionSqlKey = Nan::New(OPTION_SQL); + if (obj->Has(optionSqlKey) && obj->Get(optionSqlKey)->IsString()) { +#ifdef UNICODE + sql = new String::Value(obj->Get(optionSqlKey)->ToString()); +#else + sql = new String::Utf8Value(obj->Get(optionSqlKey)->ToString()); +#endif + } + else { +#ifdef UNICODE + sql = new String::Value(Nan::New("").ToLocalChecked()); +#else + sql = new String::Utf8Value(Nan::New("").ToLocalChecked()); +#endif + } + + Local optionParamsKey = Nan::New(OPTION_PARAMS); + if (obj->Has(optionParamsKey) && obj->Get(optionParamsKey)->IsArray()) { + params = ODBC::GetParametersFromArray( + Local::Cast(obj->Get(optionParamsKey)), + ¶mCount); + } + else { + paramCount = 0; + } + + Local optionNoResultsKey = Nan::New(OPTION_NORESULTS); + if (obj->Has(optionNoResultsKey) && obj->Get(optionNoResultsKey)->IsBoolean()) { + noResultObject = obj->Get(optionNoResultsKey)->ToBoolean()->Value(); + } + } + else { + return Nan::ThrowTypeError("ODBCConnection::QuerySync(): Argument 0 must be a String or an Object."); + } + } + else { + return Nan::ThrowTypeError("ODBCConnection::QuerySync(): Requires either 1 or 2 Arguments."); + } + //Done checking arguments + + uv_mutex_lock(&ODBC::g_odbcMutex); + + //allocate a new statment handle + ret = SQLAllocHandle( SQL_HANDLE_STMT, + conn->m_hDBC, + &hSTMT ); + + uv_mutex_unlock(&ODBC::g_odbcMutex); + + DEBUG_PRINTF("ODBCConnection::QuerySync - hSTMT=%p\n", hSTMT); + + if (SQL_SUCCEEDED(ret)) { + if (paramCount) { + for (int i = 0; i < paramCount; i++) { + prm = params[i]; + + DEBUG_PRINTF( + "ODBCConnection::UV_Query - param[%i]: ValueType=%i type=%i BufferLength=%lli size=%lli length=%lli &length=%lli\n", i, prm.ValueType, prm.ParameterType, + prm.BufferLength, prm.ColumnSize, prm.StrLen_or_IndPtr, params[i].StrLen_or_IndPtr); + + ret = SQLBindParameter( + hSTMT, //StatementHandle + i + 1, //ParameterNumber + SQL_PARAM_INPUT, //InputOutputType + prm.ValueType, + prm.ParameterType, + prm.ColumnSize, + prm.DecimalDigits, + prm.ParameterValuePtr, + prm.BufferLength, + ¶ms[i].StrLen_or_IndPtr); + + if (ret == SQL_ERROR) {break;} + } + } + + if (SQL_SUCCEEDED(ret)) { + ret = SQLExecDirect( + hSTMT, + (SQLTCHAR *) **sql, + sql->length()); + } + + // free parameters + for (int i = 0; i < paramCount; i++) { + if (prm = params[i], prm.ParameterValuePtr != NULL) { + switch (prm.ValueType) { + case SQL_C_WCHAR: free(prm.ParameterValuePtr); break; + case SQL_C_CHAR: free(prm.ParameterValuePtr); break; + case SQL_C_LONG: delete (int64_t *)prm.ParameterValuePtr; break; + case SQL_C_DOUBLE: delete (double *)prm.ParameterValuePtr; break; + case SQL_C_BIT: delete (bool *)prm.ParameterValuePtr; break; + } + } + } + + free(params); + } + + delete sql; + + //check to see if there was an error during execution + if (ret == SQL_ERROR) { + Local objError = ODBC::GetSQLError( + SQL_HANDLE_STMT, + hSTMT, + (char *) "[node-odbc] Error in ODBCConnection::QuerySync" + ); + + Nan::ThrowError(objError); + + return; + } + else if (noResultObject) { + //if there is not result object requested then + //we must destroy the STMT ourselves. + uv_mutex_lock(&ODBC::g_odbcMutex); + + SQLFreeHandle(SQL_HANDLE_STMT, hSTMT); + + uv_mutex_unlock(&ODBC::g_odbcMutex); + + info.GetReturnValue().Set(Nan::True()); + } + else { + Local result[4]; + bool* canFreeHandle = new bool(true); + + result[0] = Nan::New(conn->m_hENV); + result[1] = Nan::New(conn->m_hDBC); + result[2] = Nan::New(hSTMT); + result[3] = Nan::New(canFreeHandle); + + Local js_result = Nan::NewInstance(Nan::New(ODBCResult::constructor), 4, result).ToLocalChecked(); + + info.GetReturnValue().Set(js_result); + } +} + + +/* + * GetInfoSync + */ + +NAN_METHOD(ODBCConnection::GetInfoSync) { + DEBUG_PRINTF("ODBCConnection::GetInfoSync\n"); + Nan::HandleScope scope; + + ODBCConnection* conn = Nan::ObjectWrap::Unwrap(info.Holder()); + + if (info.Length() == 1) { + if ( !info[0]->IsNumber() ) { + return Nan::ThrowTypeError("ODBCConnection::GetInfoSync(): Argument 0 must be a Number."); + } + } + else { + return Nan::ThrowTypeError("ODBCConnection::GetInfoSync(): Requires 1 Argument."); + } + + SQLUSMALLINT InfoType = info[0]->NumberValue(); + + switch (InfoType) { + case SQL_USER_NAME: + SQLRETURN ret; + SQLTCHAR userName[255]; + SQLSMALLINT userNameLength; + + ret = SQLGetInfo(conn->m_hDBC, SQL_USER_NAME, userName, sizeof(userName), &userNameLength); + + if (SQL_SUCCEEDED(ret)) { +#ifdef UNICODE + info.GetReturnValue().Set(Nan::New((uint16_t *)userName).ToLocalChecked()); +#else + info.GetReturnValue().Set(Nan::New((const char *) userName).ToLocalChecked()); +#endif + } + break; + + default: + return Nan::ThrowTypeError("ODBCConnection::GetInfoSync(): The only supported Argument is SQL_USER_NAME."); + } +} + + +/* + * Tables + */ + +NAN_METHOD(ODBCConnection::Tables) { + Nan::HandleScope scope; + + REQ_STRO_OR_NULL_ARG(0, catalog); + REQ_STRO_OR_NULL_ARG(1, schema); + REQ_STRO_OR_NULL_ARG(2, table); + REQ_STRO_OR_NULL_ARG(3, type); + Local cb = Local::Cast(info[4]); + + ODBCConnection* conn = Nan::ObjectWrap::Unwrap(info.Holder()); + + uv_work_t* work_req = (uv_work_t *) (calloc(1, sizeof(uv_work_t))); + + query_work_data* data = + (query_work_data *) calloc(1, sizeof(query_work_data)); + + if (!data) { + Nan::LowMemoryNotification(); + Nan::ThrowError("Could not allocate enough memory"); + return; + } + + data->sql = NULL; + data->catalog = NULL; + data->schema = NULL; + data->table = NULL; + data->type = NULL; + data->column = NULL; + data->cb = new Nan::Callback(cb); + + if (!catalog->Equals(Nan::New("null").ToLocalChecked())) { +#ifdef UNICODE + data->catalog = (uint16_t *) malloc((catalog->Length() * sizeof(uint16_t)) + sizeof(uint16_t)); + catalog->Write((uint16_t *) data->catalog); +#else + data->catalog = (char *) malloc(catalog->Utf8Length() + 1); + catalog->WriteUtf8((char *) data->catalog); +#endif + } + + if (!schema->Equals(Nan::New("null").ToLocalChecked())) { +#ifdef UNICODE + data->schema = (uint16_t *) malloc((schema->Length() * sizeof(uint16_t)) + sizeof(uint16_t)); + schema->Write((uint16_t *) data->schema); +#else + data->schema = (char *) malloc(schema->Utf8Length() + 1); + schema->WriteUtf8((char *) data->schema); +#endif + } + + if (!table->Equals(Nan::New("null").ToLocalChecked())) { +#ifdef UNICODE + data->table = (uint16_t *) malloc((table->Length() * sizeof(uint16_t)) + sizeof(uint16_t)); + table->Write((uint16_t *) data->table); +#else + data->table = (char *) malloc(table->Utf8Length() + 1); + table->WriteUtf8((char *) data->table); +#endif + } + + if (!type->Equals(Nan::New("null").ToLocalChecked())) { +#ifdef UNICODE + data->type = (uint16_t *) malloc((type->Length() * sizeof(uint16_t)) + sizeof(uint16_t)); + type->Write((uint16_t *) data->type); +#else + data->type = (char *) malloc(type->Utf8Length() + 1); + type->WriteUtf8((char *) data->type); +#endif + } + + data->conn = conn; + work_req->data = data; + + uv_queue_work( + uv_default_loop(), + work_req, + UV_Tables, + (uv_after_work_cb) UV_AfterQuery); + + conn->Ref(); + + info.GetReturnValue().Set(Nan::Undefined()); +} + +void ODBCConnection::UV_Tables(uv_work_t* req) { + query_work_data* data = (query_work_data *)(req->data); + + uv_mutex_lock(&ODBC::g_odbcMutex); + + SQLAllocHandle(SQL_HANDLE_STMT, data->conn->m_hDBC, &data->hSTMT ); + + uv_mutex_unlock(&ODBC::g_odbcMutex); + + SQLRETURN ret = SQLTables( + data->hSTMT, + (SQLTCHAR *) data->catalog, SQL_NTS, + (SQLTCHAR *) data->schema, SQL_NTS, + (SQLTCHAR *) data->table, SQL_NTS, + (SQLTCHAR *) data->type, SQL_NTS + ); + + // this will be checked later in UV_AfterQuery + data->result = ret; +} + + + +/* + * Columns + */ + +NAN_METHOD(ODBCConnection::Columns) { + Nan::HandleScope scope; + + REQ_STRO_OR_NULL_ARG(0, catalog); + REQ_STRO_OR_NULL_ARG(1, schema); + REQ_STRO_OR_NULL_ARG(2, table); + REQ_STRO_OR_NULL_ARG(3, column); + + Local cb = Local::Cast(info[4]); + + ODBCConnection* conn = Nan::ObjectWrap::Unwrap(info.Holder()); + + uv_work_t* work_req = (uv_work_t *) (calloc(1, sizeof(uv_work_t))); + + query_work_data* data = (query_work_data *) calloc(1, sizeof(query_work_data)); + + if (!data) { + Nan::LowMemoryNotification(); + Nan::ThrowError("Could not allocate enough memory"); + return; + } + + data->sql = NULL; + data->catalog = NULL; + data->schema = NULL; + data->table = NULL; + data->type = NULL; + data->column = NULL; + data->cb = new Nan::Callback(cb); + + if (!catalog->Equals(Nan::New("null").ToLocalChecked())) { +#ifdef UNICODE + data->catalog = (uint16_t *) malloc((catalog->Length() * sizeof(uint16_t)) + sizeof(uint16_t)); + catalog->Write((uint16_t *) data->catalog); +#else + data->catalog = (char *) malloc(catalog->Utf8Length() + 1); + catalog->WriteUtf8((char *) data->catalog); +#endif + } + + if (!schema->Equals(Nan::New("null").ToLocalChecked())) { +#ifdef UNICODE + data->schema = (uint16_t *) malloc((schema->Length() * sizeof(uint16_t)) + sizeof(uint16_t)); + schema->Write((uint16_t *) data->schema); +#else + data->schema = (char *) malloc(schema->Utf8Length() + 1); + schema->WriteUtf8((char *) data->schema); +#endif + } + + if (!table->Equals(Nan::New("null").ToLocalChecked())) { +#ifdef UNICODE + data->table = (uint16_t *) malloc((table->Length() * sizeof(uint16_t)) + sizeof(uint16_t)); + table->Write((uint16_t *) data->table); +#else + data->table = (char *) malloc(table->Utf8Length() + 1); + table->WriteUtf8((char *) data->table); +#endif + } + + if (!column->Equals(Nan::New("null").ToLocalChecked())) { +#ifdef UNICODE + data->column = (uint16_t *) malloc((column->Length() * sizeof(uint16_t)) + sizeof(uint16_t)); + column->Write((uint16_t *) data->column); +#else + data->column = (char *) malloc(column->Utf8Length() + 1); + column->WriteUtf8((char *) data->column); +#endif + } + + data->conn = conn; + work_req->data = data; + + uv_queue_work( + uv_default_loop(), + work_req, + UV_Columns, + (uv_after_work_cb)UV_AfterQuery); + + conn->Ref(); + + info.GetReturnValue().Set(Nan::Undefined()); +} + +void ODBCConnection::UV_Columns(uv_work_t* req) { + query_work_data* data = (query_work_data *)(req->data); + + uv_mutex_lock(&ODBC::g_odbcMutex); + + SQLAllocHandle(SQL_HANDLE_STMT, data->conn->m_hDBC, &data->hSTMT ); + + uv_mutex_unlock(&ODBC::g_odbcMutex); + + SQLRETURN ret = SQLColumns( + data->hSTMT, + (SQLTCHAR *) data->catalog, SQL_NTS, + (SQLTCHAR *) data->schema, SQL_NTS, + (SQLTCHAR *) data->table, SQL_NTS, + (SQLTCHAR *) data->column, SQL_NTS + ); + + // this will be checked later in UV_AfterQuery + data->result = ret; +} + +/* + * BeginTransactionSync + * + */ + +NAN_METHOD(ODBCConnection::BeginTransactionSync) { + DEBUG_PRINTF("ODBCConnection::BeginTransactionSync\n"); + Nan::HandleScope scope; + + ODBCConnection* conn = Nan::ObjectWrap::Unwrap(info.Holder()); + + SQLRETURN ret; + + //set the connection manual commits + ret = SQLSetConnectAttr( + conn->m_hDBC, + SQL_ATTR_AUTOCOMMIT, + (SQLPOINTER) SQL_AUTOCOMMIT_OFF, + SQL_NTS); + + if (!SQL_SUCCEEDED(ret)) { + Local objError = ODBC::GetSQLError(SQL_HANDLE_DBC, conn->m_hDBC); + + Nan::ThrowError(objError); + + info.GetReturnValue().Set(Nan::False()); + } + + info.GetReturnValue().Set(Nan::True()); +} + +/* + * BeginTransaction + * + */ + +NAN_METHOD(ODBCConnection::BeginTransaction) { + DEBUG_PRINTF("ODBCConnection::BeginTransaction\n"); + Nan::HandleScope scope; + + REQ_FUN_ARG(0, cb); + + ODBCConnection* conn = Nan::ObjectWrap::Unwrap(info.Holder()); + + uv_work_t* work_req = (uv_work_t *) (calloc(1, sizeof(uv_work_t))); + + query_work_data* data = + (query_work_data *) calloc(1, sizeof(query_work_data)); + + if (!data) { + Nan::LowMemoryNotification(); + return Nan::ThrowError("Could not allocate enough memory"); + } + + data->cb = new Nan::Callback(cb); + data->conn = conn; + work_req->data = data; + + uv_queue_work( + uv_default_loop(), + work_req, + UV_BeginTransaction, + (uv_after_work_cb)UV_AfterBeginTransaction); + + return; +} + +/* + * UV_BeginTransaction + * + */ + +void ODBCConnection::UV_BeginTransaction(uv_work_t* req) { + DEBUG_PRINTF("ODBCConnection::UV_BeginTransaction\n"); + + query_work_data* data = (query_work_data *)(req->data); + + //set the connection manual commits + data->result = SQLSetConnectAttr( + data->conn->self()->m_hDBC, + SQL_ATTR_AUTOCOMMIT, + (SQLPOINTER) SQL_AUTOCOMMIT_OFF, + SQL_NTS); +} + +/* + * UV_AfterBeginTransaction + * + */ + +void ODBCConnection::UV_AfterBeginTransaction(uv_work_t* req, int status) { + DEBUG_PRINTF("ODBCConnection::UV_AfterBeginTransaction\n"); + Nan::HandleScope scope; + + //TODO: Is this supposed to be of type query_work_data? + open_connection_work_data* data = (open_connection_work_data *)(req->data); + + Local argv[1]; + + bool err = false; + + if (!SQL_SUCCEEDED(data->result)) { + err = true; + + Local objError = ODBC::GetSQLError(SQL_HANDLE_DBC, data->conn->self()->m_hDBC); + + argv[0] = objError; + } + + Nan::TryCatch try_catch; + + data->cb->Call( err ? 1 : 0, argv); + + if (try_catch.HasCaught()) { + Nan::FatalException(try_catch); + } + + delete data->cb; + + free(data); + free(req); +} + +/* + * EndTransactionSync + * + */ + +NAN_METHOD(ODBCConnection::EndTransactionSync) { + DEBUG_PRINTF("ODBCConnection::EndTransactionSync\n"); + Nan::HandleScope scope; + + ODBCConnection* conn = Nan::ObjectWrap::Unwrap(info.Holder()); + + REQ_BOOL_ARG(0, rollback); + + Local objError; + SQLRETURN ret; + bool error = false; + SQLSMALLINT completionType = (rollback->Value()) + ? SQL_ROLLBACK + : SQL_COMMIT + ; + + //Call SQLEndTran + ret = SQLEndTran( + SQL_HANDLE_DBC, + conn->m_hDBC, + completionType); + + //check how the transaction went + if (!SQL_SUCCEEDED(ret)) { + error = true; + + objError = ODBC::GetSQLError(SQL_HANDLE_DBC, conn->m_hDBC); + } + + //Reset the connection back to autocommit + ret = SQLSetConnectAttr( + conn->m_hDBC, + SQL_ATTR_AUTOCOMMIT, + (SQLPOINTER) SQL_AUTOCOMMIT_ON, + SQL_NTS); + + //check how setting the connection attr went + //but only process the code if an error has not already + //occurred. If an error occurred during SQLEndTran, + //that is the error that we want to throw. + if (!SQL_SUCCEEDED(ret) && !error) { + //TODO: if this also failed, we really should + //be restarting the connection or something to deal with this state + error = true; + + objError = ODBC::GetSQLError(SQL_HANDLE_DBC, conn->m_hDBC); + } + + if (error) { + Nan::ThrowError(objError); + + info.GetReturnValue().Set(Nan::False()); + } + else { + info.GetReturnValue().Set(Nan::True()); + } +} + +/* + * EndTransaction + * + */ + +NAN_METHOD(ODBCConnection::EndTransaction) { + DEBUG_PRINTF("ODBCConnection::EndTransaction\n"); + Nan::HandleScope scope; + + REQ_BOOL_ARG(0, rollback); + REQ_FUN_ARG(1, cb); + + ODBCConnection* conn = Nan::ObjectWrap::Unwrap(info.Holder()); + + uv_work_t* work_req = (uv_work_t *) (calloc(1, sizeof(uv_work_t))); + + query_work_data* data = + (query_work_data *) calloc(1, sizeof(query_work_data)); + + if (!data) { + Nan::LowMemoryNotification(); + return Nan::ThrowError("Could not allocate enough memory"); + } + + data->completionType = (rollback->Value()) + ? SQL_ROLLBACK + : SQL_COMMIT + ; + data->cb = new Nan::Callback(cb); + data->conn = conn; + work_req->data = data; + + uv_queue_work( + uv_default_loop(), + work_req, + UV_EndTransaction, + (uv_after_work_cb)UV_AfterEndTransaction); + + info.GetReturnValue().Set(Nan::Undefined()); +} + +/* + * UV_EndTransaction + * + */ + +void ODBCConnection::UV_EndTransaction(uv_work_t* req) { + DEBUG_PRINTF("ODBCConnection::UV_EndTransaction\n"); + + query_work_data* data = (query_work_data *)(req->data); + + bool err = false; + + //Call SQLEndTran + SQLRETURN ret = SQLEndTran( + SQL_HANDLE_DBC, + data->conn->m_hDBC, + data->completionType); + + data->result = ret; + + if (!SQL_SUCCEEDED(ret)) { + err = true; + } + + //Reset the connection back to autocommit + ret = SQLSetConnectAttr( + data->conn->m_hDBC, + SQL_ATTR_AUTOCOMMIT, + (SQLPOINTER) SQL_AUTOCOMMIT_ON, + SQL_NTS); + + if (!SQL_SUCCEEDED(ret) && !err) { + //there was not an earlier error, + //so we shall pass the return code from + //this last call. + data->result = ret; + } +} + +/* + * UV_AfterEndTransaction + * + */ + +void ODBCConnection::UV_AfterEndTransaction(uv_work_t* req, int status) { + DEBUG_PRINTF("ODBCConnection::UV_AfterEndTransaction\n"); + Nan::HandleScope scope; + + open_connection_work_data* data = (open_connection_work_data *)(req->data); + + Local argv[1]; + + bool err = false; + + if (!SQL_SUCCEEDED(data->result)) { + err = true; + + Local objError = ODBC::GetSQLError(SQL_HANDLE_DBC, data->conn->self()->m_hDBC); + + argv[0] = objError; + } + + Nan::TryCatch try_catch; + + data->cb->Call(err ? 1 : 0, argv); + + if (try_catch.HasCaught()) { + Nan::FatalException(try_catch); + } + + delete data->cb; + + free(data); + free(req); +} diff --git a/src/odbc_connection.h b/src/odbc_connection.h new file mode 100644 index 00000000..5da34417 --- /dev/null +++ b/src/odbc_connection.h @@ -0,0 +1,174 @@ +/* + Copyright (c) 2013, Dan VerWeire + Copyright (c) 2010, Lee Smith + + Permission to use, copy, modify, and/or distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ + +#ifndef _SRC_ODBC_CONNECTION_H +#define _SRC_ODBC_CONNECTION_H + +#include + +class ODBCConnection : public Nan::ObjectWrap { + public: + static Nan::Persistent OPTION_SQL; + static Nan::Persistent OPTION_PARAMS; + static Nan::Persistent OPTION_NORESULTS; + static Nan::Persistent constructor; + + static void Init(v8::Handle exports); + + void Free(); + + protected: + ODBCConnection() {}; + + explicit ODBCConnection(HENV hENV, HDBC hDBC): + Nan::ObjectWrap(), + m_hENV(hENV), + m_hDBC(hDBC) {}; + + ~ODBCConnection(); + +public: + //constructor + static NAN_METHOD(New); + + //Property Getter/Setters + static NAN_GETTER(ConnectedGetter); + static NAN_GETTER(ConnectTimeoutGetter); + static NAN_SETTER(ConnectTimeoutSetter); + static NAN_GETTER(LoginTimeoutGetter); + static NAN_SETTER(LoginTimeoutSetter); + + //async methods + static NAN_METHOD(BeginTransaction); +protected: + static void UV_BeginTransaction(uv_work_t* work_req); + static void UV_AfterBeginTransaction(uv_work_t* work_req, int status); + +public: + static NAN_METHOD(EndTransaction); +protected: + static void UV_EndTransaction(uv_work_t* work_req); + static void UV_AfterEndTransaction(uv_work_t* work_req, int status); + +public: + static NAN_METHOD(Open); +protected: + static void UV_Open(uv_work_t* work_req); + static void UV_AfterOpen(uv_work_t* work_req, int status); + +public: + static NAN_METHOD(Close); +protected: + static void UV_Close(uv_work_t* work_req); + static void UV_AfterClose(uv_work_t* work_req, int status); + +public: + static NAN_METHOD(CreateStatement); +protected: + static void UV_CreateStatement(uv_work_t* work_req); + static void UV_AfterCreateStatement(uv_work_t* work_req, int status); + +public: + static NAN_METHOD(Query); +protected: + static void UV_Query(uv_work_t* req); + static void UV_AfterQuery(uv_work_t* req, int status); + +public: + static NAN_METHOD(Columns); +protected: + static void UV_Columns(uv_work_t* req); + +public: + static NAN_METHOD(Tables); +protected: + static void UV_Tables(uv_work_t* req); + + //sync methods +public: + static NAN_METHOD(CloseSync); + static NAN_METHOD(CreateStatementSync); + static NAN_METHOD(OpenSync); + static NAN_METHOD(QuerySync); + static NAN_METHOD(BeginTransactionSync); + static NAN_METHOD(EndTransactionSync); + static NAN_METHOD(GetInfoSync); +protected: + + struct Fetch_Request { + Nan::Callback* callback; + ODBCConnection *objResult; + SQLRETURN result; + }; + + ODBCConnection *self(void) { return this; } + + protected: + HENV m_hENV; + HDBC m_hDBC; + SQLUSMALLINT canHaveMoreResults; + bool connected; + int statements; + SQLUINTEGER connectTimeout; + SQLUINTEGER loginTimeout; +}; + +struct create_statement_work_data { + Nan::Callback* cb; + ODBCConnection *conn; + HSTMT hSTMT; + int result; +}; + +struct query_work_data { + Nan::Callback* cb; + ODBCConnection *conn; + HSTMT hSTMT; + + Parameter *params; + int paramCount; + int completionType; + bool noResultObject; + + void *sql; + void *catalog; + void *schema; + void *table; + void *type; + void *column; + + int sqlLen; + int sqlSize; + + int result; +}; + +struct open_connection_work_data { + Nan::Callback* cb; + ODBCConnection *conn; + int result; + int connectionLength; + void* connection; +}; + +struct close_connection_work_data { + Nan::Callback* cb; + ODBCConnection *conn; + int result; +}; + +#endif diff --git a/src/odbc_result.cpp b/src/odbc_result.cpp new file mode 100644 index 00000000..1c1a7160 --- /dev/null +++ b/src/odbc_result.cpp @@ -0,0 +1,780 @@ +/* + Copyright (c) 2013, Dan VerWeire + + Permission to use, copy, modify, and/or distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ + +#include +#include +#include +#include +#include +#include + +#include "odbc.h" +#include "odbc_connection.h" +#include "odbc_result.h" +#include "odbc_statement.h" + +using namespace v8; +using namespace node; + +Nan::Persistent ODBCResult::constructor; +Nan::Persistent ODBCResult::OPTION_FETCH_MODE; + +void ODBCResult::Init(v8::Handle exports) { + DEBUG_PRINTF("ODBCResult::Init\n"); + Nan::HandleScope scope; + + Local constructor_template = Nan::New(New); + + // Constructor Template + constructor_template->SetClassName(Nan::New("ODBCResult").ToLocalChecked()); + + // Reserve space for one Handle + Local instance_template = constructor_template->InstanceTemplate(); + instance_template->SetInternalFieldCount(1); + + // Prototype Methods + Nan::SetPrototypeMethod(constructor_template, "fetchAll", FetchAll); + Nan::SetPrototypeMethod(constructor_template, "fetch", Fetch); + + Nan::SetPrototypeMethod(constructor_template, "moreResultsSync", MoreResultsSync); + Nan::SetPrototypeMethod(constructor_template, "closeSync", CloseSync); + Nan::SetPrototypeMethod(constructor_template, "fetchSync", FetchSync); + Nan::SetPrototypeMethod(constructor_template, "fetchAllSync", FetchAllSync); + Nan::SetPrototypeMethod(constructor_template, "getColumnNamesSync", GetColumnNamesSync); + Nan::SetPrototypeMethod(constructor_template, "getRowCountSync", GetRowCountSync); + + // Properties + OPTION_FETCH_MODE.Reset(Nan::New("fetchMode").ToLocalChecked()); + Nan::SetAccessor(instance_template, Nan::New("fetchMode").ToLocalChecked(), FetchModeGetter, FetchModeSetter); + + // Attach the Database Constructor to the target object + constructor.Reset(constructor_template->GetFunction()); + exports->Set(Nan::New("ODBCResult").ToLocalChecked(), + constructor_template->GetFunction()); +} + +ODBCResult::~ODBCResult() { + DEBUG_PRINTF("ODBCResult::~ODBCResult\n"); + //DEBUG_PRINTF("ODBCResult::~ODBCResult m_hSTMT=%x\n", m_hSTMT); + this->Free(); +} + +void ODBCResult::Free() { + DEBUG_PRINTF("ODBCResult::Free\n"); + //DEBUG_PRINTF("ODBCResult::Free m_hSTMT=%X m_canFreeHandle=%X\n", m_hSTMT, m_canFreeHandle); + + if (m_hSTMT && m_canFreeHandle) { + uv_mutex_lock(&ODBC::g_odbcMutex); + + SQLFreeHandle( SQL_HANDLE_STMT, m_hSTMT); + + m_hSTMT = NULL; + + uv_mutex_unlock(&ODBC::g_odbcMutex); + } + + if (bufferLength > 0) { + bufferLength = 0; + free(buffer); + } +} + +NAN_METHOD(ODBCResult::New) { + DEBUG_PRINTF("ODBCResult::New\n"); + Nan::HandleScope scope; + + REQ_EXT_ARG(0, js_henv); + REQ_EXT_ARG(1, js_hdbc); + REQ_EXT_ARG(2, js_hstmt); + REQ_EXT_ARG(3, js_canFreeHandle); + + HENV hENV = static_cast(js_henv->Value()); + HDBC hDBC = static_cast(js_hdbc->Value()); + HSTMT hSTMT = static_cast(js_hstmt->Value()); + bool* canFreeHandle = static_cast(js_canFreeHandle->Value()); + + //create a new OBCResult object + ODBCResult* objODBCResult = new ODBCResult(hENV, hDBC, hSTMT, *canFreeHandle); + + DEBUG_PRINTF("ODBCResult::New\n"); + //DEBUG_PRINTF("ODBCResult::New m_hDBC=%X m_hDBC=%X m_hSTMT=%X canFreeHandle=%X\n", + // objODBCResult->m_hENV, + // objODBCResult->m_hDBC, + // objODBCResult->m_hSTMT, + // objODBCResult->m_canFreeHandle + //); + + //free the pointer to canFreeHandle + delete canFreeHandle; + + //specify the buffer length + objODBCResult->bufferLength = MAX_VALUE_SIZE - 1; + + //initialze a buffer for this object + objODBCResult->buffer = (uint16_t *) malloc(objODBCResult->bufferLength + 1); + //TODO: make sure the malloc succeeded + + //set the initial colCount to 0 + objODBCResult->colCount = 0; + + //default fetchMode to FETCH_OBJECT + objODBCResult->m_fetchMode = FETCH_OBJECT; + + objODBCResult->Wrap(info.Holder()); + + info.GetReturnValue().Set(info.Holder()); +} + +NAN_GETTER(ODBCResult::FetchModeGetter) { + Nan::HandleScope scope; + + ODBCResult *obj = Nan::ObjectWrap::Unwrap(info.Holder()); + + info.GetReturnValue().Set(Nan::New(obj->m_fetchMode)); +} + +NAN_SETTER(ODBCResult::FetchModeSetter) { + Nan::HandleScope scope; + + ODBCResult *obj = Nan::ObjectWrap::Unwrap(info.Holder()); + + if (value->IsNumber()) { + obj->m_fetchMode = value->Int32Value(); + } +} + +/* + * Fetch + */ + +NAN_METHOD(ODBCResult::Fetch) { + DEBUG_PRINTF("ODBCResult::Fetch\n"); + Nan::HandleScope scope; + + ODBCResult* objODBCResult = Nan::ObjectWrap::Unwrap(info.Holder()); + + uv_work_t* work_req = (uv_work_t *) (calloc(1, sizeof(uv_work_t))); + + fetch_work_data* data = (fetch_work_data *) calloc(1, sizeof(fetch_work_data)); + + Local cb; + + //set the fetch mode to the default of this instance + data->fetchMode = objODBCResult->m_fetchMode; + + if (info.Length() == 1 && info[0]->IsFunction()) { + cb = Local::Cast(info[0]); + } + else if (info.Length() == 2 && info[0]->IsObject() && info[1]->IsFunction()) { + cb = Local::Cast(info[1]); + + Local obj = info[0]->ToObject(); + + Local fetchModeKey = Nan::New(OPTION_FETCH_MODE); + if (obj->Has(fetchModeKey) && obj->Get(fetchModeKey)->IsInt32()) { + data->fetchMode = Nan::To(obj->Get(fetchModeKey)).ToLocalChecked()->Value(); + } + } + else { + return Nan::ThrowTypeError("ODBCResult::Fetch(): 1 or 2 arguments are required. The last argument must be a callback function."); + } + + data->cb = new Nan::Callback(cb); + + data->objResult = objODBCResult; + work_req->data = data; + + uv_queue_work( + uv_default_loop(), + work_req, + UV_Fetch, + (uv_after_work_cb)UV_AfterFetch); + + objODBCResult->Ref(); + + info.GetReturnValue().Set(Nan::Undefined()); +} + +void ODBCResult::UV_Fetch(uv_work_t* work_req) { + DEBUG_PRINTF("ODBCResult::UV_Fetch\n"); + + fetch_work_data* data = (fetch_work_data *)(work_req->data); + + data->result = SQLFetch(data->objResult->m_hSTMT); +} + +void ODBCResult::UV_AfterFetch(uv_work_t* work_req, int status) { + DEBUG_PRINTF("ODBCResult::UV_AfterFetch\n"); + Nan::HandleScope scope; + + fetch_work_data* data = (fetch_work_data *)(work_req->data); + + SQLRETURN ret = data->result; + //TODO: we should probably define this on the work data so we + //don't have to keep creating it? + Local objError; + bool moreWork = true; + bool error = false; + + if (data->objResult->colCount == 0) { + data->objResult->columns = ODBC::GetColumns( + data->objResult->m_hSTMT, + &data->objResult->colCount); + } + + //check to see if the result has no columns + if (data->objResult->colCount == 0) { + //this means + moreWork = false; + } + //check to see if there was an error + else if (ret == SQL_ERROR) { + moreWork = false; + error = true; + + objError = ODBC::GetSQLError( + SQL_HANDLE_STMT, + data->objResult->m_hSTMT, + (char *) "Error in ODBCResult::UV_AfterFetch"); + } + //check to see if we are at the end of the recordset + else if (ret == SQL_NO_DATA) { + moreWork = false; + } + + if (moreWork) { + Local info[2]; + + info[0] = Nan::Null(); + if (data->fetchMode == FETCH_ARRAY) { + info[1] = ODBC::GetRecordArray( + data->objResult->m_hSTMT, + data->objResult->columns, + &data->objResult->colCount, + data->objResult->buffer, + data->objResult->bufferLength); + } + else { + info[1] = ODBC::GetRecordTuple( + data->objResult->m_hSTMT, + data->objResult->columns, + &data->objResult->colCount, + data->objResult->buffer, + data->objResult->bufferLength); + } + + Nan::TryCatch try_catch; + + data->cb->Call(2, info); + delete data->cb; + + if (try_catch.HasCaught()) { + Nan::FatalException(try_catch); + } + } + else { + ODBC::FreeColumns(data->objResult->columns, &data->objResult->colCount); + + Local info[2]; + + //if there was an error, pass that as arg[0] otherwise Null + if (error) { + info[0] = objError; + } + else { + info[0] = Nan::Null(); + } + + info[1] = Nan::Null(); + + Nan::TryCatch try_catch; + + data->cb->Call(2, info); + delete data->cb; + + if (try_catch.HasCaught()) { + Nan::FatalException(try_catch); + } + } + + data->objResult->Unref(); + + free(data); + free(work_req); + + return; +} + +/* + * FetchSync + */ + +NAN_METHOD(ODBCResult::FetchSync) { + DEBUG_PRINTF("ODBCResult::FetchSync\n"); + Nan::HandleScope scope; + + ODBCResult* objResult = Nan::ObjectWrap::Unwrap(info.Holder()); + + Local objError; + bool moreWork = true; + bool error = false; + int fetchMode = objResult->m_fetchMode; + + if (info.Length() == 1 && info[0]->IsObject()) { + Local obj = info[0]->ToObject(); + + Local fetchModeKey = Nan::New(OPTION_FETCH_MODE); + if (obj->Has(fetchModeKey) && obj->Get(fetchModeKey)->IsInt32()) { + fetchMode = Nan::To(obj->Get(fetchModeKey)).ToLocalChecked()->Value(); + } + } + + SQLRETURN ret = SQLFetch(objResult->m_hSTMT); + + if (objResult->colCount == 0) { + objResult->columns = ODBC::GetColumns( + objResult->m_hSTMT, + &objResult->colCount); + } + + //check to see if the result has no columns + if (objResult->colCount == 0) { + moreWork = false; + } + //check to see if there was an error + else if (ret == SQL_ERROR) { + moreWork = false; + error = true; + + objError = ODBC::GetSQLError( + SQL_HANDLE_STMT, + objResult->m_hSTMT, + (char *) "Error in ODBCResult::UV_AfterFetch"); + } + //check to see if we are at the end of the recordset + else if (ret == SQL_NO_DATA) { + moreWork = false; + } + + if (moreWork) { + Local data; + + if (fetchMode == FETCH_ARRAY) { + data = ODBC::GetRecordArray( + objResult->m_hSTMT, + objResult->columns, + &objResult->colCount, + objResult->buffer, + objResult->bufferLength); + } + else { + data = ODBC::GetRecordTuple( + objResult->m_hSTMT, + objResult->columns, + &objResult->colCount, + objResult->buffer, + objResult->bufferLength); + } + + info.GetReturnValue().Set(data); + } + else { + ODBC::FreeColumns(objResult->columns, &objResult->colCount); + + //if there was an error, pass that as arg[0] otherwise Null + if (error) { + Nan::ThrowError(objError); + + info.GetReturnValue().Set(Nan::Null()); + } + else { + info.GetReturnValue().Set(Nan::Null()); + } + } +} + +/* + * FetchAll + */ + +NAN_METHOD(ODBCResult::FetchAll) { + DEBUG_PRINTF("ODBCResult::FetchAll\n"); + Nan::HandleScope scope; + + ODBCResult* objODBCResult = Nan::ObjectWrap::Unwrap(info.Holder()); + + uv_work_t* work_req = (uv_work_t *) (calloc(1, sizeof(uv_work_t))); + + fetch_work_data* data = (fetch_work_data *) calloc(1, sizeof(fetch_work_data)); + + Local cb; + + data->fetchMode = objODBCResult->m_fetchMode; + + if (info.Length() == 1 && info[0]->IsFunction()) { + cb = Local::Cast(info[0]); + } + else if (info.Length() == 2 && info[0]->IsObject() && info[1]->IsFunction()) { + cb = Local::Cast(info[1]); + + Local obj = info[0]->ToObject(); + + Local fetchModeKey = Nan::New(OPTION_FETCH_MODE); + if (obj->Has(fetchModeKey) && obj->Get(fetchModeKey)->IsInt32()) { + data->fetchMode = Nan::To(obj->Get(fetchModeKey)).ToLocalChecked()->Value(); + } + } + else { + Nan::ThrowTypeError("ODBCResult::FetchAll(): 1 or 2 arguments are required. The last argument must be a callback function."); + } + + data->rows.Reset(Nan::New()); + data->errorCount = 0; + data->count = 0; + data->objError.Reset(Nan::New()); + + data->cb = new Nan::Callback(cb); + data->objResult = objODBCResult; + + work_req->data = data; + + uv_queue_work(uv_default_loop(), + work_req, + UV_FetchAll, + (uv_after_work_cb)UV_AfterFetchAll); + + data->objResult->Ref(); + + info.GetReturnValue().Set(Nan::Undefined()); +} + +void ODBCResult::UV_FetchAll(uv_work_t* work_req) { + DEBUG_PRINTF("ODBCResult::UV_FetchAll\n"); + + fetch_work_data* data = (fetch_work_data *)(work_req->data); + + data->result = SQLFetch(data->objResult->m_hSTMT); + } + +void ODBCResult::UV_AfterFetchAll(uv_work_t* work_req, int status) { + DEBUG_PRINTF("ODBCResult::UV_AfterFetchAll\n"); + Nan::HandleScope scope; + + fetch_work_data* data = (fetch_work_data *)(work_req->data); + + ODBCResult* self = data->objResult->self(); + + bool doMoreWork = true; + + if (self->colCount == 0) { + self->columns = ODBC::GetColumns(self->m_hSTMT, &self->colCount); + } + + //check to see if the result set has columns + if (self->colCount == 0) { + //this most likely means that the query was something like + //'insert into ....' + doMoreWork = false; + } + //check to see if there was an error + else if (data->result == SQL_ERROR) { + data->errorCount++; + + //NanAssignPersistent(data->objError, ODBC::GetSQLError( + data->objError.Reset(ODBC::GetSQLError( + SQL_HANDLE_STMT, + self->m_hSTMT, + (char *) "[node-odbc] Error in ODBCResult::UV_AfterFetchAll" + )); + + doMoreWork = false; + } + //check to see if we are at the end of the recordset + else if (data->result == SQL_NO_DATA) { + doMoreWork = false; + } + else { + Local rows = Nan::New(data->rows); + if (data->fetchMode == FETCH_ARRAY) { + rows->Set( + Nan::New(data->count), + ODBC::GetRecordArray( + self->m_hSTMT, + self->columns, + &self->colCount, + self->buffer, + self->bufferLength) + ); + } + else { + rows->Set( + Nan::New(data->count), + ODBC::GetRecordTuple( + self->m_hSTMT, + self->columns, + &self->colCount, + self->buffer, + self->bufferLength) + ); + } + data->count++; + } + + if (doMoreWork) { + //Go back to the thread pool and fetch more data! + uv_queue_work( + uv_default_loop(), + work_req, + UV_FetchAll, + (uv_after_work_cb)UV_AfterFetchAll); + } + else { + ODBC::FreeColumns(self->columns, &self->colCount); + + Local info[2]; + + if (data->errorCount > 0) { + info[0] = Nan::New(data->objError); + } + else { + info[0] = Nan::Null(); + } + + info[1] = Nan::New(data->rows); + + Nan::TryCatch try_catch; + + data->cb->Call(2, info); + delete data->cb; + data->rows.Reset(); + data->objError.Reset(); + + if (try_catch.HasCaught()) { + Nan::FatalException(try_catch); + } + + free(data); + free(work_req); + + self->Unref(); + } +} + +/* + * FetchAllSync + */ + +NAN_METHOD(ODBCResult::FetchAllSync) { + DEBUG_PRINTF("ODBCResult::FetchAllSync\n"); + Nan::HandleScope scope; + + ODBCResult* self = Nan::ObjectWrap::Unwrap(info.Holder()); + + Local objError = Nan::New(); + + SQLRETURN ret; + int count = 0; + int errorCount = 0; + int fetchMode = self->m_fetchMode; + + if (info.Length() == 1 && info[0]->IsObject()) { + Local obj = info[0]->ToObject(); + + Local fetchModeKey = Nan::New(OPTION_FETCH_MODE); + if (obj->Has(fetchModeKey) && obj->Get(fetchModeKey)->IsInt32()) { + fetchMode = Nan::To(obj->Get(fetchModeKey)).ToLocalChecked()->Value(); + } + } + + if (self->colCount == 0) { + self->columns = ODBC::GetColumns(self->m_hSTMT, &self->colCount); + } + + Local rows = Nan::New(); + + //Only loop through the recordset if there are columns + if (self->colCount > 0) { + //loop through all records + while (true) { + ret = SQLFetch(self->m_hSTMT); + + //check to see if there was an error + if (ret == SQL_ERROR) { + errorCount++; + + objError = ODBC::GetSQLError( + SQL_HANDLE_STMT, + self->m_hSTMT, + (char *) "[node-odbc] Error in ODBCResult::UV_AfterFetchAll; probably" + " your query did not have a result set." + ); + + break; + } + + //check to see if we are at the end of the recordset + if (ret == SQL_NO_DATA) { + ODBC::FreeColumns(self->columns, &self->colCount); + + break; + } + + if (fetchMode == FETCH_ARRAY) { + rows->Set( + Nan::New(count), + ODBC::GetRecordArray( + self->m_hSTMT, + self->columns, + &self->colCount, + self->buffer, + self->bufferLength) + ); + } + else { + rows->Set( + Nan::New(count), + ODBC::GetRecordTuple( + self->m_hSTMT, + self->columns, + &self->colCount, + self->buffer, + self->bufferLength) + ); + } + count++; + } + } + else { + ODBC::FreeColumns(self->columns, &self->colCount); + } + + //throw the error object if there were errors + if (errorCount > 0) { + Nan::ThrowError(objError); + } + + info.GetReturnValue().Set(rows); +} + +/* + * CloseSync + * + */ + +NAN_METHOD(ODBCResult::CloseSync) { + DEBUG_PRINTF("ODBCResult::CloseSync\n"); + Nan::HandleScope scope; + + OPT_INT_ARG(0, closeOption, SQL_DESTROY); + + ODBCResult* result = Nan::ObjectWrap::Unwrap(info.Holder()); + + DEBUG_PRINTF("ODBCResult::CloseSync closeOption=%i m_canFreeHandle=%i\n", + closeOption, result->m_canFreeHandle); + + if (closeOption == SQL_DESTROY && result->m_canFreeHandle) { + result->Free(); + } + else if (closeOption == SQL_DESTROY && !result->m_canFreeHandle) { + //We technically can't free the handle so, we'll SQL_CLOSE + uv_mutex_lock(&ODBC::g_odbcMutex); + + SQLFreeStmt(result->m_hSTMT, SQL_CLOSE); + + uv_mutex_unlock(&ODBC::g_odbcMutex); + } + else { + uv_mutex_lock(&ODBC::g_odbcMutex); + + SQLFreeStmt(result->m_hSTMT, closeOption); + + uv_mutex_unlock(&ODBC::g_odbcMutex); + } + + info.GetReturnValue().Set(Nan::True()); +} + +NAN_METHOD(ODBCResult::MoreResultsSync) { + DEBUG_PRINTF("ODBCResult::MoreResultsSync\n"); + Nan::HandleScope scope; + + ODBCResult* result = Nan::ObjectWrap::Unwrap(info.Holder()); + + SQLRETURN ret = SQLMoreResults(result->m_hSTMT); + + if (ret == SQL_ERROR) { + Local objError = ODBC::GetSQLError( + SQL_HANDLE_STMT, + result->m_hSTMT, + (char *)"[node-odbc] Error in ODBCResult::MoreResultsSync" + ); + + Nan::ThrowError(objError); + } + + info.GetReturnValue().Set(SQL_SUCCEEDED(ret) || ret == SQL_ERROR ? Nan::True() : Nan::False()); +} + +/* + * GetColumnNamesSync + */ + +NAN_METHOD(ODBCResult::GetColumnNamesSync) { + DEBUG_PRINTF("ODBCResult::GetColumnNamesSync\n"); + Nan::HandleScope scope; + + ODBCResult* self = Nan::ObjectWrap::Unwrap(info.Holder()); + + Local cols = Nan::New(); + + if (self->colCount == 0) { + self->columns = ODBC::GetColumns(self->m_hSTMT, &self->colCount); + } + + for (int i = 0; i < self->colCount; i++) { +#ifdef UNICODE + cols->Set(Nan::New(i), + Nan::New((uint16_t*) self->columns[i].name).ToLocalChecked()); +#else + cols->Set(Nan::New(i), + Nan::New((char *) self->columns[i].name).ToLocalChecked()); +#endif + + } + + info.GetReturnValue().Set(cols); +} + +/* + * GetRowCountSync + */ + +NAN_METHOD(ODBCResult::GetRowCountSync) { + DEBUG_PRINTF("ODBCResult::GetRowCountSync\n"); + Nan::HandleScope scope; + + ODBCResult* self = Nan::ObjectWrap::Unwrap(info.Holder()); + + SQLLEN rowCount = 0; + + SQLRETURN ret = SQLRowCount(self->m_hSTMT, &rowCount); + + if (!SQL_SUCCEEDED(ret)) { + rowCount = 0; + } + + info.GetReturnValue().Set(Nan::New(rowCount)); +} diff --git a/src/odbc_result.h b/src/odbc_result.h new file mode 100644 index 00000000..f100614d --- /dev/null +++ b/src/odbc_result.h @@ -0,0 +1,101 @@ +/* + Copyright (c) 2013, Dan VerWeire + + Permission to use, copy, modify, and/or distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ + +#ifndef _SRC_ODBC_RESULT_H +#define _SRC_ODBC_RESULT_H + +#include + +class ODBCResult : public Nan::ObjectWrap { + public: + static Nan::Persistent OPTION_FETCH_MODE; + static Nan::Persistent constructor; + static void Init(v8::Handle exports); + + void Free(); + + protected: + ODBCResult() {}; + + explicit ODBCResult(HENV hENV, HDBC hDBC, HSTMT hSTMT, bool canFreeHandle): + Nan::ObjectWrap(), + m_hENV(hENV), + m_hDBC(hDBC), + m_hSTMT(hSTMT), + m_canFreeHandle(canFreeHandle) {}; + + ~ODBCResult(); + + //constructor +public: + static NAN_METHOD(New); + + //async methods + static NAN_METHOD(Fetch); +protected: + static void UV_Fetch(uv_work_t* work_req); + static void UV_AfterFetch(uv_work_t* work_req, int status); + +public: + static NAN_METHOD(FetchAll); +protected: + static void UV_FetchAll(uv_work_t* work_req); + static void UV_AfterFetchAll(uv_work_t* work_req, int status); + + //sync methods +public: + static NAN_METHOD(CloseSync); + static NAN_METHOD(MoreResultsSync); + static NAN_METHOD(FetchSync); + static NAN_METHOD(FetchAllSync); + static NAN_METHOD(GetColumnNamesSync); + static NAN_METHOD(GetRowCountSync); + + //property getter/setters + static NAN_GETTER(FetchModeGetter); + static NAN_SETTER(FetchModeSetter); + +protected: + struct fetch_work_data { + Nan::Callback* cb; + ODBCResult *objResult; + SQLRETURN result; + + int fetchMode; + int count; + int errorCount; + Nan::Persistent rows; + Nan::Persistent objError; + }; + + ODBCResult *self(void) { return this; } + + protected: + HENV m_hENV; + HDBC m_hDBC; + HSTMT m_hSTMT; + bool m_canFreeHandle; + int m_fetchMode; + + uint16_t *buffer; + int bufferLength; + Column *columns; + short colCount; +}; + + + +#endif diff --git a/src/odbc_statement.cpp b/src/odbc_statement.cpp new file mode 100644 index 00000000..9f92ecc8 --- /dev/null +++ b/src/odbc_statement.cpp @@ -0,0 +1,1022 @@ +/* + Copyright (c) 2013, Dan VerWeire + + Permission to use, copy, modify, and/or distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ + +#include +#include +#include +#include +#include +#include + +#include "odbc.h" +#include "odbc_connection.h" +#include "odbc_result.h" +#include "odbc_statement.h" + +using namespace v8; +using namespace node; + +Nan::Persistent ODBCStatement::constructor; + +void ODBCStatement::Init(v8::Handle exports) { + DEBUG_PRINTF("ODBCStatement::Init\n"); + Nan::HandleScope scope; + + Local t = Nan::New(New); + + // Constructor Template + + t->SetClassName(Nan::New("ODBCStatement").ToLocalChecked()); + + // Reserve space for one Handle + Local instance_template = t->InstanceTemplate(); + instance_template->SetInternalFieldCount(1); + + // Prototype Methods + Nan::SetPrototypeMethod(t, "execute", Execute); + Nan::SetPrototypeMethod(t, "executeSync", ExecuteSync); + + Nan::SetPrototypeMethod(t, "executeDirect", ExecuteDirect); + Nan::SetPrototypeMethod(t, "executeDirectSync", ExecuteDirectSync); + + Nan::SetPrototypeMethod(t, "executeNonQuery", ExecuteNonQuery); + Nan::SetPrototypeMethod(t, "executeNonQuerySync", ExecuteNonQuerySync); + + Nan::SetPrototypeMethod(t, "prepare", Prepare); + Nan::SetPrototypeMethod(t, "prepareSync", PrepareSync); + + Nan::SetPrototypeMethod(t, "bind", Bind); + Nan::SetPrototypeMethod(t, "bindSync", BindSync); + + Nan::SetPrototypeMethod(t, "closeSync", CloseSync); + + // Attach the Database Constructor to the target object + constructor.Reset(t->GetFunction()); + exports->Set(Nan::New("ODBCStatement").ToLocalChecked(), t->GetFunction()); +} + +ODBCStatement::~ODBCStatement() { + this->Free(); +} + +void ODBCStatement::Free() { + DEBUG_PRINTF("ODBCStatement::Free\n"); + //if we previously had parameters, then be sure to free them + if (paramCount) { + int count = paramCount; + paramCount = 0; + + Parameter prm; + + //free parameter memory + for (int i = 0; i < count; i++) { + if (prm = params[i], prm.ParameterValuePtr != NULL) { + switch (prm.ValueType) { + case SQL_C_WCHAR: free(prm.ParameterValuePtr); break; + case SQL_C_CHAR: free(prm.ParameterValuePtr); break; + case SQL_C_SBIGINT: delete (int64_t *)prm.ParameterValuePtr; break; + case SQL_C_DOUBLE: delete (double *)prm.ParameterValuePtr; break; + case SQL_C_BIT: delete (bool *)prm.ParameterValuePtr; break; + } + } + } + + free(params); + } + + if (m_hSTMT) { + uv_mutex_lock(&ODBC::g_odbcMutex); + + SQLFreeHandle(SQL_HANDLE_STMT, m_hSTMT); + m_hSTMT = NULL; + + uv_mutex_unlock(&ODBC::g_odbcMutex); + + if (bufferLength > 0) { + free(buffer); + } + } +} + +NAN_METHOD(ODBCStatement::New) { + DEBUG_PRINTF("ODBCStatement::New\n"); + Nan::HandleScope scope; + + REQ_EXT_ARG(0, js_henv); + REQ_EXT_ARG(1, js_hdbc); + REQ_EXT_ARG(2, js_hstmt); + + HENV hENV = static_cast(js_henv->Value()); + HDBC hDBC = static_cast(js_hdbc->Value()); + HSTMT hSTMT = static_cast(js_hstmt->Value()); + + //create a new OBCResult object + ODBCStatement* stmt = new ODBCStatement(hENV, hDBC, hSTMT); + + //specify the buffer length + stmt->bufferLength = MAX_VALUE_SIZE - 1; + + //initialze a buffer for this object + stmt->buffer = (uint16_t *) malloc(stmt->bufferLength + 1); + //TODO: make sure the malloc succeeded + + //set the initial colCount to 0 + stmt->colCount = 0; + + //initialize the paramCount + stmt->paramCount = 0; + + stmt->Wrap(info.Holder()); + + info.GetReturnValue().Set(info.Holder()); +} + +/* + * Execute + */ + +NAN_METHOD(ODBCStatement::Execute) { + DEBUG_PRINTF("ODBCStatement::Execute\n"); + + Nan::HandleScope scope; + + REQ_FUN_ARG(0, cb); + + ODBCStatement* stmt = Nan::ObjectWrap::Unwrap(info.Holder()); + + uv_work_t* work_req = (uv_work_t *) (calloc(1, sizeof(uv_work_t))); + + execute_work_data* data = + (execute_work_data *) calloc(1, sizeof(execute_work_data)); + + data->cb = new Nan::Callback(cb); + + data->stmt = stmt; + work_req->data = data; + + uv_queue_work( + uv_default_loop(), + work_req, + UV_Execute, + (uv_after_work_cb)UV_AfterExecute); + + stmt->Ref(); + + info.GetReturnValue().Set(Nan::Undefined()); +} + +void ODBCStatement::UV_Execute(uv_work_t* req) { + DEBUG_PRINTF("ODBCStatement::UV_Execute\n"); + + execute_work_data* data = (execute_work_data *)(req->data); + + SQLRETURN ret; + + ret = SQLExecute(data->stmt->m_hSTMT); + + data->result = ret; +} + +void ODBCStatement::UV_AfterExecute(uv_work_t* req, int status) { + DEBUG_PRINTF("ODBCStatement::UV_AfterExecute\n"); + + execute_work_data* data = (execute_work_data *)(req->data); + + Nan::HandleScope scope; + + //an easy reference to the statment object + ODBCStatement* self = data->stmt->self(); + + //First thing, let's check if the execution of the query returned any errors + if(data->result == SQL_ERROR) { + ODBC::CallbackSQLError( + SQL_HANDLE_STMT, + self->m_hSTMT, + data->cb); + } + else { + Local info[4]; + bool* canFreeHandle = new bool(false); + + info[0] = Nan::New(self->m_hENV); + info[1] = Nan::New(self->m_hDBC); + info[2] = Nan::New(self->m_hSTMT); + info[3] = Nan::New(canFreeHandle); + + Local js_result = Nan::NewInstance(Nan::New(ODBCResult::constructor), 4, info).ToLocalChecked(); + + info[0] = Nan::Null(); + info[1] = js_result; + + Nan::TryCatch try_catch; + + data->cb->Call(2, info); + + if (try_catch.HasCaught()) { + Nan::FatalException(try_catch); + } + } + + self->Unref(); + delete data->cb; + + free(data); + free(req); +} + +/* + * ExecuteSync + * + */ + +NAN_METHOD(ODBCStatement::ExecuteSync) { + DEBUG_PRINTF("ODBCStatement::ExecuteSync\n"); + + Nan::HandleScope scope; + + ODBCStatement* stmt = Nan::ObjectWrap::Unwrap(info.Holder()); + + SQLRETURN ret = SQLExecute(stmt->m_hSTMT); + + if(ret == SQL_ERROR) { + Local objError = ODBC::GetSQLError( + SQL_HANDLE_STMT, + stmt->m_hSTMT, + (char *) "[node-odbc] Error in ODBCStatement::ExecuteSync" + ); + + Nan::ThrowError(objError); + + info.GetReturnValue().Set(Nan::Null()); + } + else { + Local result[4]; + bool* canFreeHandle = new bool(false); + + result[0] = Nan::New(stmt->m_hENV); + result[1] = Nan::New(stmt->m_hDBC); + result[2] = Nan::New(stmt->m_hSTMT); + result[3] = Nan::New(canFreeHandle); + + Local js_result = Nan::NewInstance(Nan::New(ODBCResult::constructor), 4, result).ToLocalChecked(); + + info.GetReturnValue().Set(js_result); + } +} + +/* + * ExecuteNonQuery + */ + +NAN_METHOD(ODBCStatement::ExecuteNonQuery) { + DEBUG_PRINTF("ODBCStatement::ExecuteNonQuery\n"); + + Nan::HandleScope scope; + + REQ_FUN_ARG(0, cb); + + ODBCStatement* stmt = Nan::ObjectWrap::Unwrap(info.Holder()); + + uv_work_t* work_req = (uv_work_t *) (calloc(1, sizeof(uv_work_t))); + + execute_work_data* data = + (execute_work_data *) calloc(1, sizeof(execute_work_data)); + + data->cb = new Nan::Callback(cb); + + data->stmt = stmt; + work_req->data = data; + + uv_queue_work( + uv_default_loop(), + work_req, + UV_ExecuteNonQuery, + (uv_after_work_cb)UV_AfterExecuteNonQuery); + + stmt->Ref(); + + info.GetReturnValue().Set(Nan::Undefined()); +} + +void ODBCStatement::UV_ExecuteNonQuery(uv_work_t* req) { + DEBUG_PRINTF("ODBCStatement::ExecuteNonQuery\n"); + + execute_work_data* data = (execute_work_data *)(req->data); + + SQLRETURN ret; + + ret = SQLExecute(data->stmt->m_hSTMT); + + data->result = ret; +} + +void ODBCStatement::UV_AfterExecuteNonQuery(uv_work_t* req, int status) { + DEBUG_PRINTF("ODBCStatement::ExecuteNonQuery\n"); + + execute_work_data* data = (execute_work_data *)(req->data); + + Nan::HandleScope scope; + + //an easy reference to the statment object + ODBCStatement* self = data->stmt->self(); + + //First thing, let's check if the execution of the query returned any errors + if(data->result == SQL_ERROR) { + ODBC::CallbackSQLError( + SQL_HANDLE_STMT, + self->m_hSTMT, + data->cb); + } + else { + SQLLEN rowCount = 0; + + SQLRETURN ret = SQLRowCount(self->m_hSTMT, &rowCount); + + if (!SQL_SUCCEEDED(ret)) { + rowCount = 0; + } + + uv_mutex_lock(&ODBC::g_odbcMutex); + SQLFreeStmt(self->m_hSTMT, SQL_CLOSE); + uv_mutex_unlock(&ODBC::g_odbcMutex); + + Local info[2]; + + info[0] = Nan::Null(); + // We get a potential loss of precision here. Number isn't as big as int64. Probably fine though. + info[1] = Nan::New(rowCount); + + Nan::TryCatch try_catch; + + data->cb->Call(2, info); + + if (try_catch.HasCaught()) { + Nan::FatalException(try_catch); + } + } + + self->Unref(); + delete data->cb; + + free(data); + free(req); +} + +/* + * ExecuteNonQuerySync + * + */ + +NAN_METHOD(ODBCStatement::ExecuteNonQuerySync) { + DEBUG_PRINTF("ODBCStatement::ExecuteNonQuerySync\n"); + + Nan::HandleScope scope; + + ODBCStatement* stmt = Nan::ObjectWrap::Unwrap(info.Holder()); + + SQLRETURN ret = SQLExecute(stmt->m_hSTMT); + + if(ret == SQL_ERROR) { + Local objError = ODBC::GetSQLError( + SQL_HANDLE_STMT, + stmt->m_hSTMT, + (char *) "[node-odbc] Error in ODBCStatement::ExecuteSync" + ); + + Nan::ThrowError(objError); + + info.GetReturnValue().Set(Nan::Null()); + } + else { + SQLLEN rowCount = 0; + + SQLRETURN ret = SQLRowCount(stmt->m_hSTMT, &rowCount); + + if (!SQL_SUCCEEDED(ret)) { + rowCount = 0; + } + + uv_mutex_lock(&ODBC::g_odbcMutex); + SQLFreeStmt(stmt->m_hSTMT, SQL_CLOSE); + uv_mutex_unlock(&ODBC::g_odbcMutex); + + info.GetReturnValue().Set(Nan::New(rowCount)); + } +} + +/* + * ExecuteDirect + * + */ + +NAN_METHOD(ODBCStatement::ExecuteDirect) { + DEBUG_PRINTF("ODBCStatement::ExecuteDirect\n"); + + Nan::HandleScope scope; + + REQ_STRO_ARG(0, sql); + REQ_FUN_ARG(1, cb); + + ODBCStatement* stmt = Nan::ObjectWrap::Unwrap(info.Holder()); + + uv_work_t* work_req = (uv_work_t *) (calloc(1, sizeof(uv_work_t))); + + execute_direct_work_data* data = + (execute_direct_work_data *) calloc(1, sizeof(execute_direct_work_data)); + + data->cb = new Nan::Callback(cb); + +#ifdef UNICODE + data->sqlLen = sql->Length(); + data->sql = (uint16_t *) malloc((data->sqlLen * sizeof(uint16_t)) + sizeof(uint16_t)); + sql->Write((uint16_t *) data->sql); +#else + data->sqlLen = sql->Utf8Length(); + data->sql = (char *) malloc(data->sqlLen +1); + sql->WriteUtf8((char *) data->sql); +#endif + + data->stmt = stmt; + work_req->data = data; + + uv_queue_work( + uv_default_loop(), + work_req, + UV_ExecuteDirect, + (uv_after_work_cb)UV_AfterExecuteDirect); + + stmt->Ref(); + + info.GetReturnValue().Set(Nan::Undefined()); +} + +void ODBCStatement::UV_ExecuteDirect(uv_work_t* req) { + DEBUG_PRINTF("ODBCStatement::UV_ExecuteDirect\n"); + + execute_direct_work_data* data = (execute_direct_work_data *)(req->data); + + SQLRETURN ret; + + ret = SQLExecDirect( + data->stmt->m_hSTMT, + (SQLTCHAR *) data->sql, + data->sqlLen); + + data->result = ret; +} + +void ODBCStatement::UV_AfterExecuteDirect(uv_work_t* req, int status) { + DEBUG_PRINTF("ODBCStatement::UV_AfterExecuteDirect\n"); + + execute_direct_work_data* data = (execute_direct_work_data *)(req->data); + + Nan::HandleScope scope; + + //an easy reference to the statment object + ODBCStatement* self = data->stmt->self(); + + //First thing, let's check if the execution of the query returned any errors + if(data->result == SQL_ERROR) { + ODBC::CallbackSQLError( + SQL_HANDLE_STMT, + self->m_hSTMT, + data->cb); + } + else { + Local info[4]; + bool* canFreeHandle = new bool(false); + + info[0] = Nan::New(self->m_hENV); + info[1] = Nan::New(self->m_hDBC); + info[2] = Nan::New(self->m_hSTMT); + info[3] = Nan::New(canFreeHandle); + + Local js_result = Nan::NewInstance(Nan::New(ODBCResult::constructor), 4, info).ToLocalChecked(); + + info[0] = Nan::Null(); + info[1] = js_result; + + Nan::TryCatch try_catch; + + data->cb->Call(2, info); + + if (try_catch.HasCaught()) { + Nan::FatalException(try_catch); + } + } + + self->Unref(); + delete data->cb; + + free(data->sql); + free(data); + free(req); +} + +/* + * ExecuteDirectSync + * + */ + +NAN_METHOD(ODBCStatement::ExecuteDirectSync) { + DEBUG_PRINTF("ODBCStatement::ExecuteDirectSync\n"); + + Nan::HandleScope scope; + +#ifdef UNICODE + REQ_WSTR_ARG(0, sql); +#else + REQ_STR_ARG(0, sql); +#endif + + ODBCStatement* stmt = Nan::ObjectWrap::Unwrap(info.Holder()); + + SQLRETURN ret = SQLExecDirect( + stmt->m_hSTMT, + (SQLTCHAR *) *sql, + sql.length()); + + if(ret == SQL_ERROR) { + Local objError = ODBC::GetSQLError( + SQL_HANDLE_STMT, + stmt->m_hSTMT, + (char *) "[node-odbc] Error in ODBCStatement::ExecuteDirectSync" + ); + + Nan::ThrowError(objError); + + info.GetReturnValue().Set(Nan::Null()); + } + else { + Local result[4]; + bool* canFreeHandle = new bool(false); + + result[0] = Nan::New(stmt->m_hENV); + result[1] = Nan::New(stmt->m_hDBC); + result[2] = Nan::New(stmt->m_hSTMT); + result[3] = Nan::New(canFreeHandle); + + Local js_result = Nan::NewInstance(Nan::New(ODBCResult::constructor), 4, result).ToLocalChecked(); + + info.GetReturnValue().Set(js_result); + } +} + +/* + * PrepareSync + * + */ + +NAN_METHOD(ODBCStatement::PrepareSync) { + DEBUG_PRINTF("ODBCStatement::PrepareSync\n"); + + Nan::HandleScope scope; + + REQ_STRO_ARG(0, sql); + + ODBCStatement* stmt = Nan::ObjectWrap::Unwrap(info.Holder()); + + SQLRETURN ret; + +#ifdef UNICODE + int sqlLen = sql->Length() + 1; + uint16_t* sql2 = (uint16_t *) malloc(sqlLen * sizeof(uint16_t)); + sql->Write(sql2); +#else + int sqlLen = sql->Utf8Length() + 1; + char* sql2 = (char *) malloc(sqlLen); + sql->WriteUtf8(sql2); +#endif + + ret = SQLPrepare( + stmt->m_hSTMT, + (SQLTCHAR *) sql2, + sqlLen); + + if (SQL_SUCCEEDED(ret)) { + info.GetReturnValue().Set(Nan::True()); + } + else { + Local objError = ODBC::GetSQLError( + SQL_HANDLE_STMT, + stmt->m_hSTMT, + (char *) "[node-odbc] Error in ODBCStatement::PrepareSync" + ); + + Nan::ThrowError(objError); + + info.GetReturnValue().Set(Nan::False()); + } +} + +/* + * Prepare + * + */ + +NAN_METHOD(ODBCStatement::Prepare) { + DEBUG_PRINTF("ODBCStatement::Prepare\n"); + + Nan::HandleScope scope; + + REQ_STRO_ARG(0, sql); + REQ_FUN_ARG(1, cb); + + ODBCStatement* stmt = Nan::ObjectWrap::Unwrap(info.Holder()); + + uv_work_t* work_req = (uv_work_t *) (calloc(1, sizeof(uv_work_t))); + + prepare_work_data* data = + (prepare_work_data *) calloc(1, sizeof(prepare_work_data)); + + data->cb = new Nan::Callback(cb); + +#ifdef UNICODE + data->sqlLen = sql->Length(); + data->sql = (uint16_t *) malloc((data->sqlLen * sizeof(uint16_t)) + sizeof(uint16_t)); + sql->Write((uint16_t *) data->sql); +#else + data->sqlLen = sql->Utf8Length(); + data->sql = (char *) malloc(data->sqlLen +1); + sql->WriteUtf8((char *) data->sql); +#endif + + data->stmt = stmt; + + work_req->data = data; + + uv_queue_work( + uv_default_loop(), + work_req, + UV_Prepare, + (uv_after_work_cb)UV_AfterPrepare); + + stmt->Ref(); + + info.GetReturnValue().Set(Nan::Undefined()); +} + +void ODBCStatement::UV_Prepare(uv_work_t* req) { + DEBUG_PRINTF("ODBCStatement::UV_Prepare\n"); + + prepare_work_data* data = (prepare_work_data *)(req->data); + + DEBUG_PRINTF("ODBCStatement::UV_Prepare\n"); + //DEBUG_PRINTF("ODBCStatement::UV_Prepare m_hDBC=%X m_hDBC=%X m_hSTMT=%X\n", + // data->stmt->m_hENV, + // data->stmt->m_hDBC, + // data->stmt->m_hSTMT + //); + + SQLRETURN ret; + + ret = SQLPrepare( + data->stmt->m_hSTMT, + (SQLTCHAR *) data->sql, + data->sqlLen); + + data->result = ret; +} + +void ODBCStatement::UV_AfterPrepare(uv_work_t* req, int status) { + DEBUG_PRINTF("ODBCStatement::UV_AfterPrepare\n"); + + prepare_work_data* data = (prepare_work_data *)(req->data); + + DEBUG_PRINTF("ODBCStatement::UV_AfterPrepare\n"); + //DEBUG_PRINTF("ODBCStatement::UV_AfterPrepare m_hDBC=%X m_hDBC=%X m_hSTMT=%X\n", + // data->stmt->m_hENV, + // data->stmt->m_hDBC, + // data->stmt->m_hSTMT + //); + + Nan::HandleScope scope; + + //First thing, let's check if the execution of the query returned any errors + if(data->result == SQL_ERROR) { + ODBC::CallbackSQLError( + SQL_HANDLE_STMT, + data->stmt->m_hSTMT, + data->cb); + } + else { + Local info[2]; + + info[0] = Nan::Null(); + info[1] = Nan::True(); + + Nan::TryCatch try_catch; + + data->cb->Call( 2, info); + + if (try_catch.HasCaught()) { + Nan::FatalException(try_catch); + } + } + + data->stmt->Unref(); + delete data->cb; + + free(data->sql); + free(data); + free(req); +} + +/* + * BindSync + * + */ + +NAN_METHOD(ODBCStatement::BindSync) { + DEBUG_PRINTF("ODBCStatement::BindSync\n"); + + Nan::HandleScope scope; + + if ( !info[0]->IsArray() ) { + return Nan::ThrowTypeError("Argument 1 must be an Array"); + } + + ODBCStatement* stmt = Nan::ObjectWrap::Unwrap(info.Holder()); + + DEBUG_PRINTF("ODBCStatement::BindSync\n"); + //DEBUG_PRINTF("ODBCStatement::BindSync m_hDBC=%X m_hDBC=%X m_hSTMT=%X\n", + // stmt->m_hENV, + // stmt->m_hDBC, + // stmt->m_hSTMT + //); + + //if we previously had parameters, then be sure to free them + //before allocating more + if (stmt->paramCount) { + int count = stmt->paramCount; + stmt->paramCount = 0; + + Parameter prm; + + //free parameter memory + for (int i = 0; i < count; i++) { + if (prm = stmt->params[i], prm.ParameterValuePtr != NULL) { + switch (prm.ValueType) { + case SQL_C_WCHAR: free(prm.ParameterValuePtr); break; + case SQL_C_CHAR: free(prm.ParameterValuePtr); break; + case SQL_C_SBIGINT: delete (int64_t *)prm.ParameterValuePtr; break; + case SQL_C_DOUBLE: delete (double *)prm.ParameterValuePtr; break; + case SQL_C_BIT: delete (bool *)prm.ParameterValuePtr; break; + } + } + } + + free(stmt->params); + } + + stmt->params = ODBC::GetParametersFromArray( + Local::Cast(info[0]), + &stmt->paramCount); + + SQLRETURN ret = SQL_SUCCESS; + Parameter prm; + + for (int i = 0; i < stmt->paramCount; i++) { + prm = stmt->params[i]; + + /*DEBUG_PRINTF( + "ODBCStatement::BindSync - param[%i]: c_type=%i type=%i " + "buffer_length=%i size=%i length=%i &length=%X decimals=%i value=%s\n", + i, prm.ValueType, prm.ParameterType, prm.BufferLength, prm.ColumnSize, prm.length, + &stmt->params[i].StrLen_or_IndPtr, prm.DecimalDigits, prm.ParameterValuePtr + );*/ + + ret = SQLBindParameter( + stmt->m_hSTMT, //StatementHandle + i + 1, //ParameterNumber + SQL_PARAM_INPUT, //InputOutputType + prm.ValueType, + prm.ParameterType, + prm.ColumnSize, + prm.DecimalDigits, + prm.ParameterValuePtr, + prm.BufferLength, + &stmt->params[i].StrLen_or_IndPtr); + + if (ret == SQL_ERROR) { + break; + } + } + + if (SQL_SUCCEEDED(ret)) { + info.GetReturnValue().Set(Nan::True()); + } + else { + Local objError = ODBC::GetSQLError( + SQL_HANDLE_STMT, + stmt->m_hSTMT, + (char *) "[node-odbc] Error in ODBCStatement::BindSync" + ); + + Nan::ThrowError(objError); + + info.GetReturnValue().Set(Nan::False()); + } +} + +/* + * Bind + * + */ + +NAN_METHOD(ODBCStatement::Bind) { + DEBUG_PRINTF("ODBCStatement::Bind\n"); + + Nan::HandleScope scope; + + if ( !info[0]->IsArray() ) { + return Nan::ThrowError("Argument 1 must be an Array"); + } + + REQ_FUN_ARG(1, cb); + + ODBCStatement* stmt = Nan::ObjectWrap::Unwrap(info.Holder()); + + uv_work_t* work_req = (uv_work_t *) (calloc(1, sizeof(uv_work_t))); + + bind_work_data* data = + (bind_work_data *) calloc(1, sizeof(bind_work_data)); + + //if we previously had parameters, then be sure to free them + //before allocating more + if (stmt->paramCount) { + int count = stmt->paramCount; + stmt->paramCount = 0; + + Parameter prm; + + //free parameter memory + for (int i = 0; i < count; i++) { + if (prm = stmt->params[i], prm.ParameterValuePtr != NULL) { + switch (prm.ValueType) { + case SQL_C_WCHAR: free(prm.ParameterValuePtr); break; + case SQL_C_CHAR: free(prm.ParameterValuePtr); break; + case SQL_C_SBIGINT: delete (int64_t *)prm.ParameterValuePtr; break; + case SQL_C_DOUBLE: delete (double *)prm.ParameterValuePtr; break; + case SQL_C_BIT: delete (bool *)prm.ParameterValuePtr; break; + } + } + } + + free(stmt->params); + } + + data->stmt = stmt; + + DEBUG_PRINTF("ODBCStatement::Bind\n"); + //DEBUG_PRINTF("ODBCStatement::Bind m_hDBC=%X m_hDBC=%X m_hSTMT=%X\n", + // data->stmt->m_hENV, + // data->stmt->m_hDBC, + // data->stmt->m_hSTMT + //); + + data->cb = new Nan::Callback(cb); + + data->stmt->params = ODBC::GetParametersFromArray( + Local::Cast(info[0]), + &data->stmt->paramCount); + + work_req->data = data; + + uv_queue_work( + uv_default_loop(), + work_req, + UV_Bind, + (uv_after_work_cb)UV_AfterBind); + + stmt->Ref(); + + info.GetReturnValue().Set(Nan::Undefined()); +} + +void ODBCStatement::UV_Bind(uv_work_t* req) { + DEBUG_PRINTF("ODBCStatement::UV_Bind\n"); + + bind_work_data* data = (bind_work_data *)(req->data); + + DEBUG_PRINTF("ODBCStatement::UV_Bind\n"); + //DEBUG_PRINTF("ODBCStatement::UV_Bind m_hDBC=%X m_hDBC=%X m_hSTMT=%X\n", + // data->stmt->m_hENV, + // data->stmt->m_hDBC, + // data->stmt->m_hSTMT + //); + + SQLRETURN ret = SQL_SUCCESS; + Parameter prm; + + for (int i = 0; i < data->stmt->paramCount; i++) { + prm = data->stmt->params[i]; + + /*DEBUG_PRINTF( + "ODBCStatement::UV_Bind - param[%i]: c_type=%i type=%i " + "buffer_length=%i size=%i length=%i &length=%X decimals=%i value=%s\n", + i, prm.ValueType, prm.ParameterType, prm.BufferLength, prm.ColumnSize, prm.length, + &data->stmt->params[i].StrLen_or_IndPtr, prm.DecimalDigits, prm.ParameterValuePtr + );*/ + + ret = SQLBindParameter( + data->stmt->m_hSTMT, //StatementHandle + i + 1, //ParameterNumber + SQL_PARAM_INPUT, //InputOutputType + prm.ValueType, + prm.ParameterType, + prm.ColumnSize, + prm.DecimalDigits, + prm.ParameterValuePtr, + prm.BufferLength, + &data->stmt->params[i].StrLen_or_IndPtr); + + if (ret == SQL_ERROR) { + break; + } + } + + data->result = ret; +} + +void ODBCStatement::UV_AfterBind(uv_work_t* req, int status) { + DEBUG_PRINTF("ODBCStatement::UV_AfterBind\n"); + + bind_work_data* data = (bind_work_data *)(req->data); + + Nan::HandleScope scope; + + //an easy reference to the statment object + ODBCStatement* self = data->stmt->self(); + + //Check if there were errors + if(data->result == SQL_ERROR) { + ODBC::CallbackSQLError( + SQL_HANDLE_STMT, + self->m_hSTMT, + data->cb); + } + else { + Local info[2]; + + info[0] = Nan::Null(); + info[1] = Nan::True(); + + Nan::TryCatch try_catch; + + data->cb->Call( 2, info); + + if (try_catch.HasCaught()) { + Nan::FatalException(try_catch); + } + } + + self->Unref(); + delete data->cb; + + free(data); + free(req); +} + +/* + * CloseSync + */ + +NAN_METHOD(ODBCStatement::CloseSync) { + DEBUG_PRINTF("ODBCStatement::CloseSync\n"); + + Nan::HandleScope scope; + + OPT_INT_ARG(0, closeOption, SQL_DESTROY); + + ODBCStatement* stmt = Nan::ObjectWrap::Unwrap(info.Holder()); + + DEBUG_PRINTF("ODBCStatement::CloseSync closeOption=%i\n", + closeOption); + + if (closeOption == SQL_DESTROY) { + stmt->Free(); + } + else { + uv_mutex_lock(&ODBC::g_odbcMutex); + + SQLFreeStmt(stmt->m_hSTMT, closeOption); + + uv_mutex_unlock(&ODBC::g_odbcMutex); + } + + info.GetReturnValue().Set(Nan::True()); +} diff --git a/src/odbc_statement.h b/src/odbc_statement.h new file mode 100644 index 00000000..0aa5afa3 --- /dev/null +++ b/src/odbc_statement.h @@ -0,0 +1,134 @@ +/* + Copyright (c) 2013, Dan VerWeire + + Permission to use, copy, modify, and/or distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ + +#ifndef _SRC_ODBC_STATEMENT_H +#define _SRC_ODBC_STATEMENT_H + +#include + +class ODBCStatement : public Nan::ObjectWrap { + public: + static Nan::Persistent constructor; + static void Init(v8::Handle exports); + + void Free(); + + protected: + ODBCStatement() {}; + + explicit ODBCStatement(HENV hENV, HDBC hDBC, HSTMT hSTMT): + Nan::ObjectWrap(), + m_hENV(hENV), + m_hDBC(hDBC), + m_hSTMT(hSTMT) {}; + + ~ODBCStatement(); + + //constructor +public: + static NAN_METHOD(New); + + //async methods + static NAN_METHOD(Execute); +protected: + static void UV_Execute(uv_work_t* work_req); + static void UV_AfterExecute(uv_work_t* work_req, int status); + +public: + static NAN_METHOD(ExecuteDirect); +protected: + static void UV_ExecuteDirect(uv_work_t* work_req); + static void UV_AfterExecuteDirect(uv_work_t* work_req, int status); + +public: + static NAN_METHOD(ExecuteNonQuery); +protected: + static void UV_ExecuteNonQuery(uv_work_t* work_req); + static void UV_AfterExecuteNonQuery(uv_work_t* work_req, int status); + +public: + static NAN_METHOD(Prepare); +protected: + static void UV_Prepare(uv_work_t* work_req); + static void UV_AfterPrepare(uv_work_t* work_req, int status); + +public: + static NAN_METHOD(Bind); +protected: + static void UV_Bind(uv_work_t* work_req); + static void UV_AfterBind(uv_work_t* work_req, int status); + + //sync methods +public: + static NAN_METHOD(CloseSync); + static NAN_METHOD(ExecuteSync); + static NAN_METHOD(ExecuteDirectSync); + static NAN_METHOD(ExecuteNonQuerySync); + static NAN_METHOD(PrepareSync); + static NAN_METHOD(BindSync); +protected: + + struct Fetch_Request { + Nan::Callback* callback; + ODBCStatement *objResult; + SQLRETURN result; + }; + + ODBCStatement *self(void) { return this; } + + protected: + HENV m_hENV; + HDBC m_hDBC; + HSTMT m_hSTMT; + + Parameter *params; + int paramCount; + + uint16_t *buffer; + int bufferLength; + Column *columns; + short colCount; +}; + +struct execute_direct_work_data { + Nan::Callback* cb; + ODBCStatement *stmt; + int result; + void *sql; + int sqlLen; +}; + +struct execute_work_data { + Nan::Callback* cb; + ODBCStatement *stmt; + int result; +}; + +struct prepare_work_data { + Nan::Callback* cb; + ODBCStatement *stmt; + int result; + void *sql; + int sqlLen; +}; + +struct bind_work_data { + Nan::Callback* cb; + ODBCStatement *stmt; + int result; +}; + +#endif diff --git a/src/strptime.c b/src/strptime.c new file mode 100644 index 00000000..cca482fd --- /dev/null +++ b/src/strptime.c @@ -0,0 +1,389 @@ +/*- + * Copyright (c) 1997, 1998, 2005, 2008 The NetBSD Foundation, Inc. + * All rights reserved. + * + * This code was contributed to The NetBSD Foundation by Klaus Klein. + * Heavily optimised by David Laight + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS + * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +/* + * Inclusion in node-odbc note: + * + * This code was found here: http://social.msdn.microsoft.com/forums/en-US/vcgeneral/thread/25a654f9-b6b6-490a-8f36-c87483bb36b7 + * One user posted what looks to be a scaled down version of the NetBSD code + * but did not include any header with their work. Since it seems pretty obvious + * that the user took much of the code from NetBSD, that is why the NetBSD header + * is displayed above. + */ + +#include "strptime.h" + +static int conv_num(const char **, int *, int, int); +static int strncasecmp(char *s1, char *s2, size_t n); + +char * strptime(const char *buf, const char *fmt, struct tm *tm) +{ + char c; + const char *bp; + size_t len = 0; + int alt_format, i, split_year = 0; + + bp = buf; + + while ((c = *fmt) != '\0') + { + /* Clear `alternate' modifier prior to new conversion. */ + alt_format = 0; + + /* Eat up white-space. */ + if (isspace(c)) + { + while (isspace(*bp)) + bp++; + + fmt++; + continue; + } + + if ((c = *fmt++) != '%') + goto literal; + + +again: switch (c = *fmt++) + { + case '%': /* "%%" is converted to "%". */ + literal: + if (c != *bp++) + return (0); + break; + + /* + * "Alternative" modifiers. Just set the appropriate flag + * and start over again. + */ + case 'E': /* "%E?" alternative conversion modifier. */ + LEGAL_ALT(0); + alt_format |= ALT_E; + goto again; + + case 'O': /* "%O?" alternative conversion modifier. */ + LEGAL_ALT(0); + alt_format |= ALT_O; + goto again; + + /* + * "Complex" conversion rules, implemented through recursion. + */ + case 'c': /* Date and time, using the locale's format. */ + LEGAL_ALT(ALT_E); + if (!(bp = strptime(bp, "%x %X", tm))) + return (0); + break; + + case 'D': /* The date as "%m/%d/%y". */ + LEGAL_ALT(0); + if (!(bp = strptime(bp, "%m/%d/%y", tm))) + return (0); + break; + + case 'R': /* The time as "%H:%M". */ + LEGAL_ALT(0); + if (!(bp = strptime(bp, "%H:%M", tm))) + return (0); + break; + + case 'r': /* The time in 12-hour clock representation. */ + LEGAL_ALT(0); + if (!(bp = strptime(bp, "%I:%M:%S %p", tm))) + return (0); + break; + + case 'T': /* The time as "%H:%M:%S". */ + LEGAL_ALT(0); + if (!(bp = strptime(bp, "%H:%M:%S", tm))) + return (0); + break; + + case 'X': /* The time, using the locale's format. */ + LEGAL_ALT(ALT_E); + if (!(bp = strptime(bp, "%H:%M:%S", tm))) + return (0); + break; + + case 'x': /* The date, using the locale's format. */ + LEGAL_ALT(ALT_E); + if (!(bp = strptime(bp, "%m/%d/%y", tm))) + return (0); + break; + + /* + * "Elementary" conversion rules. + */ + case 'A': /* The day of week, using the locale's form. */ + case 'a': + LEGAL_ALT(0); + for (i = 0; i < 7; i++) + { + /* Full name. */ + len = strlen(day[i]); + if (strncasecmp((char *)(day[i]), (char *)bp, len) == 0) + break; + + /* Abbreviated name. */ + len = strlen(abday[i]); + if (strncasecmp((char *)(abday[i]), (char *)bp, len) == 0) + break; + } + + /* Nothing matched. */ + if (i == 7) + return (0); + + tm->tm_wday = i; + bp += len; + break; + + case 'B': /* The month, using the locale's form. */ + case 'b': + case 'h': + LEGAL_ALT(0); + for (i = 0; i < 12; i++) + { + /* Full name. */ + + len = strlen(mon[i]); + if (strncasecmp((char *)(mon[i]), (char *)bp, len) == 0) + break; + + /* Abbreviated name. */ + len = strlen(abmon[i]); + if (strncasecmp((char *)(abmon[i]),(char *) bp, len) == 0) + break; + } + + /* Nothing matched. */ + if (i == 12) + return (0); + + tm->tm_mon = i; + bp += len; + break; + + case 'C': /* The century number. */ + LEGAL_ALT(ALT_E); + if (!(conv_num(&bp, &i, 0, 99))) + return (0); + + if (split_year) + { + tm->tm_year = (tm->tm_year % 100) + (i * 100); + } else { + tm->tm_year = i * 100; + split_year = 1; + } + break; + + case 'd': /* The day of month. */ + case 'e': + LEGAL_ALT(ALT_O); + if (!(conv_num(&bp, &tm->tm_mday, 1, 31))) + return (0); + break; + + case 'k': /* The hour (24-hour clock representation). */ + LEGAL_ALT(0); + /* FALLTHROUGH */ + case 'H': + LEGAL_ALT(ALT_O); + if (!(conv_num(&bp, &tm->tm_hour, 0, 23))) + return (0); + break; + + case 'l': /* The hour (12-hour clock representation). */ + LEGAL_ALT(0); + /* FALLTHROUGH */ + case 'I': + LEGAL_ALT(ALT_O); + if (!(conv_num(&bp, &tm->tm_hour, 1, 12))) + return (0); + if (tm->tm_hour == 12) + tm->tm_hour = 0; + break; + + case 'j': /* The day of year. */ + LEGAL_ALT(0); + if (!(conv_num(&bp, &i, 1, 366))) + return (0); + tm->tm_yday = i - 1; + break; + + case 'M': /* The minute. */ + LEGAL_ALT(ALT_O); + if (!(conv_num(&bp, &tm->tm_min, 0, 59))) + return (0); + break; + + case 'm': /* The month. */ + LEGAL_ALT(ALT_O); + if (!(conv_num(&bp, &i, 1, 12))) + return (0); + tm->tm_mon = i - 1; + break; + +// case 'p': /* The locale's equivalent of AM/PM. */ +// LEGAL_ALT(0); +// /* AM? */ +// if (strcasecmp(am_pm[0], bp) == 0) +// { +// if (tm->tm_hour > 11) +// return (0); +// +// bp += strlen(am_pm[0]); +// break; +// } +// /* PM? */ +// else if (strcasecmp(am_pm[1], bp) == 0) +// { +// if (tm->tm_hour > 11) +// return (0); +// +// tm->tm_hour += 12; +// bp += strlen(am_pm[1]); +// break; +// } +// +// /* Nothing matched. */ +// return (0); + + case 'S': /* The seconds. */ + LEGAL_ALT(ALT_O); + if (!(conv_num(&bp, &tm->tm_sec, 0, 61))) + return (0); + break; + + case 'U': /* The week of year, beginning on sunday. */ + case 'W': /* The week of year, beginning on monday. */ + LEGAL_ALT(ALT_O); + /* + * XXX This is bogus, as we can not assume any valid + * information present in the tm structure at this + * point to calculate a real value, so just check the + * range for now. + */ + if (!(conv_num(&bp, &i, 0, 53))) + return (0); + break; + + case 'w': /* The day of week, beginning on sunday. */ + LEGAL_ALT(ALT_O); + if (!(conv_num(&bp, &tm->tm_wday, 0, 6))) + return (0); + break; + + case 'Y': /* The year. */ + LEGAL_ALT(ALT_E); + if (!(conv_num(&bp, &i, 0, 9999))) + return (0); + + tm->tm_year = i - TM_YEAR_BASE; + break; + + case 'y': /* The year within 100 years of the epoch. */ + LEGAL_ALT(ALT_E | ALT_O); + if (!(conv_num(&bp, &i, 0, 99))) + return (0); + + if (split_year) + { + tm->tm_year = ((tm->tm_year / 100) * 100) + i; + break; + } + split_year = 1; + if (i <= 68) + tm->tm_year = i + 2000 - TM_YEAR_BASE; + else + tm->tm_year = i + 1900 - TM_YEAR_BASE; + break; + + /* + * Miscellaneous conversions. + */ + case 'n': /* Any kind of white-space. */ + case 't': + LEGAL_ALT(0); + while (isspace(*bp)) + bp++; + break; + + + default: /* Unknown/unsupported conversion. */ + return (0); + } + + + } + + /* LINTED functional specification */ + return ((char *)bp); +} + + +static int conv_num(const char **buf, int *dest, int llim, int ulim) +{ + int result = 0; + + /* The limit also determines the number of valid digits. */ + int rulim = ulim; + + if (**buf < '0' || **buf > '9') + return (0); + + do { + result *= 10; + result += *(*buf)++ - '0'; + rulim /= 10; + } while ((result * 10 <= ulim) && rulim && **buf >= '0' && **buf <= '9'); + + if (result < llim || result > ulim) + return (0); + + *dest = result; + return (1); +} + +int strncasecmp(char *s1, char *s2, size_t n) +{ + if (n == 0) + return 0; + + while (n-- != 0 && tolower(*s1) == tolower(*s2)) + { + if (n == 0 || *s1 == '\0' || *s2 == '\0') + break; + s1++; + s2++; + } + + return tolower(*(unsigned char *) s1) - tolower(*(unsigned char *) s2); +} diff --git a/src/strptime.h b/src/strptime.h new file mode 100644 index 00000000..ce0cbfaa --- /dev/null +++ b/src/strptime.h @@ -0,0 +1,47 @@ +#ifndef _STRPTIME_H +#define _STRPTIME_H + +#define ALT_E 0x01 +#define ALT_O 0x02 +//#define LEGAL_ALT(x) { if (alt_format & ~(x)) return (0); } +#define LEGAL_ALT(x) { ; } +#define TM_YEAR_BASE (1900) + +#include +#include +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + +char * strptime(const char *buf, const char *fmt, struct tm *tm); + +#ifdef __cplusplus +} +#endif + +static const char *day[7] = { + "Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", + "Friday", "Saturday" +}; + +static const char *abday[7] = { + "Sun","Mon","Tue","Wed","Thu","Fri","Sat" +}; + +static const char *mon[12] = { + "January", "February", "March", "April", "May", "June", "July", + "August", "September", "October", "November", "December" +}; + +static const char *abmon[12] = { + "Jan", "Feb", "Mar", "Apr", "May", "Jun", + "Jul", "Aug", "Sep", "Oct", "Nov", "Dec" +}; +static const char *am_pm[2] = { + "AM", "PM" +}; + +#endif \ No newline at end of file diff --git a/test/bench-prepare-bind-execute-closeSync.js b/test/bench-prepare-bind-execute-closeSync.js new file mode 100644 index 00000000..d858fa09 --- /dev/null +++ b/test/bench-prepare-bind-execute-closeSync.js @@ -0,0 +1,60 @@ +var common = require("./common") + , odbc = require("../") + , db = new odbc.Database() + , iterations = 10000 + ; + +db.open(common.connectionString, function(err){ + if (err) { + console.error(err); + process.exit(1); + } + + issueQuery3(function () { + finish(); + }); +}); + +function issueQuery3(done) { + var count = 0 + , time = new Date().getTime(); + + var stmt = db.prepareSync('select ? as test'); + + for (var x = 0; x < iterations; x++) { + (function (x) { + stmt.bind([x], function (err) { + if (err) { + console.log(err); + return finish(); + } + + //console.log(x); + + stmt.execute(cb); + }); + })(x); + } + + function cb (err, result) { + if (err) { + console.error(err); + return finish(); + } + + //console.log(result.fetchAllSync()); + + result.closeSync(); + + if (++count == iterations) { + var elapsed = new Date().getTime() - time; + + console.log("%d queries issued in %d seconds, %d/sec : Prepare - Bind - Execute - CloseSync", count, elapsed/1000, Math.floor(count/(elapsed/1000))); + return done(); + } + } +} + +function finish() { + db.close(function () {}); +} diff --git a/test/bench-prepare-bind-executeNonQuery.js b/test/bench-prepare-bind-executeNonQuery.js new file mode 100644 index 00000000..f6c5d2a3 --- /dev/null +++ b/test/bench-prepare-bind-executeNonQuery.js @@ -0,0 +1,54 @@ +var common = require("./common") + , odbc = require("../") + , db = new odbc.Database() + , iterations = 10000 + ; + +db.open(common.connectionString, function(err){ + if (err) { + console.error(err); + process.exit(1); + } + + issueQuery2(function () { + finish(); + }); +}); + +function issueQuery2(done) { + var count = 0 + , time = new Date().getTime(); + + var stmt = db.prepareSync('select ? as test'); + + for (var x = 0; x < iterations; x++) { + (function (x) { + stmt.bind([x], function (err) { + if (err) { + console.log(err); + return finish(); + } + + stmt.executeNonQuery(cb); + }); + })(x); + } + + function cb (err, data) { + if (err) { + console.error(err); + return finish(); + } + + if (++count == iterations) { + var elapsed = new Date().getTime() - time; + + console.log("%d queries issued in %d seconds, %d/sec : Prepare - Bind - ExecuteNonQuery ", count, elapsed/1000, Math.floor(count/(elapsed/1000))); + return done(); + } + } +} + +function finish() { + db.close(function () {}); +} diff --git a/test/bench-prepare-bindSync-execute-closeSync.js b/test/bench-prepare-bindSync-execute-closeSync.js new file mode 100644 index 00000000..cbf0f670 --- /dev/null +++ b/test/bench-prepare-bindSync-execute-closeSync.js @@ -0,0 +1,50 @@ +var common = require("./common") + , odbc = require("../") + , db = new odbc.Database() + , iterations = 10000 + ; + +db.open(common.connectionString, function(err){ + if (err) { + console.error(err); + process.exit(1); + } + + issueQuery3(function () { + finish(); + }); +}); + +function issueQuery3(done) { + var count = 0 + , time = new Date().getTime(); + + var stmt = db.prepareSync('select ? as test'); + + for (var x = 0; x < iterations; x++) { + (function (x) { + stmt.bindSync([x]); + stmt.execute(cb); + })(x); + } + + function cb (err, result) { + if (err) { + console.error(err); + return finish(); + } + + result.closeSync(); + + if (++count == iterations) { + var elapsed = new Date().getTime() - time; + + console.log("%d queries issued in %d seconds, %d/sec : Prepare - Bind - Execute - CloseSync", count, elapsed/1000, Math.floor(count/(elapsed/1000))); + return done(); + } + } +} + +function finish() { + db.close(function () {}); +} diff --git a/test/bench-prepare-bindSync-executeNonQuery.js b/test/bench-prepare-bindSync-executeNonQuery.js new file mode 100644 index 00000000..f26da824 --- /dev/null +++ b/test/bench-prepare-bindSync-executeNonQuery.js @@ -0,0 +1,48 @@ +var common = require("./common") + , odbc = require("../") + , db = new odbc.Database() + , iterations = 10000 + ; + +db.open(common.connectionString, function(err){ + if (err) { + console.error(err); + process.exit(1); + } + + issueQuery2(function () { + finish(); + }); +}); + +function issueQuery2(done) { + var count = 0 + , time = new Date().getTime(); + + var stmt = db.prepareSync('select ? as test'); + + for (var x = 0; x < iterations; x++) { + (function (x) { + stmt.bindSync([x]); + stmt.executeNonQuery(cb); + })(x); + } + + function cb (err, data) { + if (err) { + console.error(err); + return finish(); + } + + if (++count == iterations) { + var elapsed = new Date().getTime() - time; + + console.log("%d queries issued in %d seconds, %d/sec : Prepare - Bind - ExecuteNonQuery ", count, elapsed/1000, Math.floor(count/(elapsed/1000))); + return done(); + } + } +} + +function finish() { + db.close(function () {}); +} diff --git a/test/bench-prepare-execute-closeSync.js b/test/bench-prepare-execute-closeSync.js new file mode 100644 index 00000000..c4d1e53c --- /dev/null +++ b/test/bench-prepare-execute-closeSync.js @@ -0,0 +1,51 @@ +var common = require("./common") + , odbc = require("../") + , db = new odbc.Database() + , iterations = 10000 + ; + +db.open(common.connectionString, function(err){ + if (err) { + console.error(err); + process.exit(1); + } + + issueQuery3(function () { + finish(); + }); +}); + +function issueQuery3(done) { + var count = 0 + , time = new Date().getTime(); + + var stmt = db.prepareSync('select ? as test'); + + for (var x = 0; x < iterations; x++) { + (function (x) { + stmt.execute([x], cb); + })(x); + } + + function cb (err, result) { + if (err) { + console.error(err); + return finish(); + } + + //console.log(result.fetchAllSync()); + + result.closeSync(); + + if (++count == iterations) { + var elapsed = new Date().getTime() - time; + + console.log("%d queries issued in %d seconds, %d/sec : Prepare - Execute - CloseSync", count, elapsed/1000, Math.floor(count/(elapsed/1000))); + return done(); + } + } +} + +function finish() { + db.close(function () {}); +} diff --git a/test/bench-prepare-executeNonQuery.js b/test/bench-prepare-executeNonQuery.js new file mode 100644 index 00000000..14b9320a --- /dev/null +++ b/test/bench-prepare-executeNonQuery.js @@ -0,0 +1,47 @@ +var common = require("./common") + , odbc = require("../") + , db = new odbc.Database() + , iterations = 10000 + ; + +db.open(common.connectionString, function(err){ + if (err) { + console.error(err); + process.exit(1); + } + + issueQuery2(function () { + finish(); + }); +}); + +function issueQuery2(done) { + var count = 0 + , time = new Date().getTime(); + + var stmt = db.prepareSync('select ? as test'); + + for (var x = 0; x < iterations; x++) { + (function (x) { + stmt.executeNonQuery([x], cb); + })(x); + } + + function cb (err, data) { + if (err) { + console.error(err); + return finish(); + } + + if (++count == iterations) { + var elapsed = new Date().getTime() - time; + + console.log("%d queries issued in %d seconds, %d/sec : Prepare - ExecuteNonQuery ", count, elapsed/1000, Math.floor(count/(elapsed/1000))); + return done(); + } + } +} + +function finish() { + db.close(function () {}); +} diff --git a/test/bench-prepare-not.js b/test/bench-prepare-not.js new file mode 100644 index 00000000..8596da49 --- /dev/null +++ b/test/bench-prepare-not.js @@ -0,0 +1,43 @@ +var common = require("./common") + , odbc = require("../") + , db = new odbc.Database() + , iterations = 10000 + ; + +db.open(common.connectionString, function(err){ + if (err) { + console.error(err); + process.exit(1); + } + + issueQuery1(function () { + finish(); + }); +}); + +function issueQuery1(done) { + var count = 0 + , time = new Date().getTime(); + + for (var x = 0; x < iterations; x++) { + db.query("select 1 + ? as test", [1], cb); + } + + function cb (err, data) { + if (err) { + console.error(err); + return finish(); + } + + if (++count == iterations) { + var elapsed = new Date().getTime() - time; + + console.log("%d queries issued in %d seconds, %d/sec : Query", count, elapsed/1000, Math.floor(count/(elapsed/1000))); + return done(); + } + } +} + +function finish() { + db.close(function () {}); +} diff --git a/test/bench-query-fetch-parameters.js b/test/bench-query-fetch-parameters.js new file mode 100644 index 00000000..746ee5ef --- /dev/null +++ b/test/bench-query-fetch-parameters.js @@ -0,0 +1,44 @@ +var common = require("./common") +, odbc = require("../") +, db = new odbc.Database(); + +db.open(common.connectionString, function(err){ + if (err) { + console.error(err); + process.exit(1); + } + + issueQuery(); +}); + +function issueQuery() { + var count = 0 + , iterations = 10000 + , time = new Date().getTime(); + + function iteration() { + db.query("select ? + ?, ? as test", [Math.floor(Math.random() * 1000), Math.floor(Math.random() * 1000), "This is a string"], cb); + } + + iteration() + + function cb (err, result) { + if (err) { + console.error("query: ", err); + return finish(); + } + + if (++count == iterations) { + var elapsed = new Date().getTime() - time; + + console.log("%d queries issued in %d seconds, %d/sec", count, elapsed/1000, Math.floor(count/(elapsed/1000))); + return finish(); + } else { + iteration(); + } + } + + function finish() { + db.close(function () {}); + } +} diff --git a/test/bench-query-fetch.js b/test/bench-query-fetch.js new file mode 100644 index 00000000..241589f8 --- /dev/null +++ b/test/bench-query-fetch.js @@ -0,0 +1,63 @@ +var common = require("./common") +, odbc = require("../") +, db = new odbc.Database(); + +db.open(common.connectionString, function(err){ + if (err) { + console.error(err); + process.exit(1); + } + + issueQuery(); +}); + +function issueQuery() { + var count = 0 + , iterations = 10000 + , time = new Date().getTime(); + + function iteration() { + db.queryResult("select 1 + 1 as test", cb); + } + + iteration() + + function cb (err, result) { + if (err) { + console.error("queryResult: ", err); + return finish(); + } + + fetchAll(result); + } + + function fetchAll(rs) { + rs.fetch(function (err, data) { + if (err) { + console.error(err); + return finish(); + } + + //if data is null, then no more data + if (!data) { + rs.closeSync(); + + if (++count == iterations) { + var elapsed = new Date().getTime() - time; + + console.log("%d queries issued in %d seconds, %d/sec", count, elapsed/1000, Math.floor(count/(elapsed/1000))); + return finish(); + } else { + iteration() + } + } + else { + fetchAll(rs); + } + }); + } + + function finish() { + db.close(function () {}); + } +} diff --git a/test/bench-query-fetchAll.js b/test/bench-query-fetchAll.js new file mode 100644 index 00000000..30816e82 --- /dev/null +++ b/test/bench-query-fetchAll.js @@ -0,0 +1,53 @@ +var common = require("./common") +, odbc = require("../") +, db = new odbc.Database(); + +db.open(common.connectionString, function(err){ + if (err) { + console.error(err); + process.exit(1); + } + + issueQuery(); +}); + +function issueQuery() { + var count = 0 + , iterations = 10000 + , time = new Date().getTime(); + + function iteration() { + db.queryResult("select 1 + 1 as test", cb); + } + + iteration(); + + function cb (err, result) { + if (err) { + console.error(err); + return finish(); + } + + result.fetchAll(function (err, data) { + if (err) { + console.error(err); + return finish(); + } + + result.closeSync(); + + if (++count == iterations) { + var elapsed = new Date().getTime() - time; + + console.log("%d queries issued in %d seconds, %d/sec", count, elapsed/1000, Math.floor(count/(elapsed/1000))); + return finish(); + } else { + iteration(); + } + }); + } + + function finish() { + db.close(function () {}); + } +} diff --git a/test/bench-query-fetchAllSync.js b/test/bench-query-fetchAllSync.js new file mode 100644 index 00000000..e5390777 --- /dev/null +++ b/test/bench-query-fetchAllSync.js @@ -0,0 +1,43 @@ +var common = require("./common") +, odbc = require("../") +, db = new odbc.Database(); + +db.open(common.connectionString, function(err){ + if (err) { + console.error(err); + process.exit(1); + } + + issueQuery(); +}); + +function issueQuery() { + var count = 0 + , iterations = 10000 + , time = new Date().getTime(); + + for (var x = 0; x < iterations; x++) { + db.queryResult("select 1 + 1 as test", cb); + } + + function cb (err, result) { + if (err) { + console.error(err); + return finish(); + } + + var data = result.fetchAllSync(); + result.closeSync(); + + if (++count == iterations) { + var elapsed = new Date().getTime() - time; + + console.log("%d queries issued in %d seconds, %d/sec", count, elapsed/1000, Math.floor(count/(elapsed/1000))); + return finish(); + } + } + + function finish() { + db.close(function () {}); + } +} \ No newline at end of file diff --git a/test/bench-query.js b/test/bench-query.js new file mode 100644 index 00000000..c4e4c90a --- /dev/null +++ b/test/bench-query.js @@ -0,0 +1,40 @@ +var common = require("./common") + , odbc = require("../") + , db = new odbc.Database(); + +db.open(common.connectionString, function(err){ + if (err) { + console.error(err); + process.exit(1); + } + + issueQuery(); +}); + +function issueQuery() { + var count = 0 + , iterations = 10000 + , time = new Date().getTime(); + + for (var x = 0; x < iterations; x++) { + db.query("select 1 + 1 as test", cb); + } + + function cb (err, data) { + if (err) { + console.error(err); + return finish(); + } + + if (++count == iterations) { + var elapsed = new Date().getTime() - time; + + console.log("%d queries issued in %d seconds, %d/sec", count, elapsed/1000, Math.floor(count/(elapsed/1000))); + return finish(); + } + } +} + +function finish() { + db.close(function () {}); +} diff --git a/test/bench-querySync-fetchArray.js b/test/bench-querySync-fetchArray.js new file mode 100644 index 00000000..2674dd18 --- /dev/null +++ b/test/bench-querySync-fetchArray.js @@ -0,0 +1,29 @@ +var common = require("./common") + , odbc = require("../") + , db = new odbc.Database({ fetchMode : odbc.FETCH_ARRAY }); + +db.open(common.connectionString, function(err){ + if (err) { + console.error(err); + process.exit(1); + } + + issueQuery(); +}); + +function issueQuery() { + var count = 0 + , iterations = 10000 + , time = new Date().getTime(); + + for (var x = 0; x < iterations; x++) { + var data = db.querySync("select 1 + 1 as test"); + count += 1; + } + + var elapsed = new Date().getTime() - time; + + console.log("%d queries issued in %d seconds, %d/sec", count, elapsed/1000, Math.floor(count/(elapsed/1000))); + + db.close(function () {}); +} \ No newline at end of file diff --git a/test/bench-querySync-parameters.js b/test/bench-querySync-parameters.js new file mode 100644 index 00000000..dca0f108 --- /dev/null +++ b/test/bench-querySync-parameters.js @@ -0,0 +1,27 @@ +var common = require("./common") +, odbc = require("../") +, db = new odbc.Database(); + +db.open(common.connectionString, function(err){ + if (err) { + console.error(err); + process.exit(1); + } + + issueQuery(); +}); + +function issueQuery() { + var count = 0 + , iterations = 10000 + , time = new Date().getTime(); + + for (var x = 0; x < iterations; x++) { + db.querySync("select ? + ?, ? as test", [Math.floor(Math.random() * 1000), Math.floor(Math.random() * 1000), "This is a string"]); + } + + var elapsed = new Date().getTime() - time; + console.log("%d queries issued in %d seconds, %d/sec", iterations, elapsed/1000, Math.floor(iterations/(elapsed/1000))); + + db.close(function () {}); +} diff --git a/test/bench-querySync.js b/test/bench-querySync.js new file mode 100644 index 00000000..8a8a8948 --- /dev/null +++ b/test/bench-querySync.js @@ -0,0 +1,29 @@ +var common = require("./common") + , odbc = require("../") + , db = new odbc.Database(); + +db.open(common.connectionString, function(err){ + if (err) { + console.error(err); + process.exit(1); + } + + issueQuery(); +}); + +function issueQuery() { + var count = 0 + , iterations = 10000 + , time = new Date().getTime(); + + for (var x = 0; x < iterations; x++) { + var data = db.querySync("select 1 + 1 as test"); + count += 1; + } + + var elapsed = new Date().getTime() - time; + + console.log("%d queries issued in %d seconds, %d/sec", count, elapsed/1000, Math.floor(count/(elapsed/1000))); + + db.close(function () { }); +} \ No newline at end of file diff --git a/test/common.js b/test/common.js index 14d94230..552cfae8 100644 --- a/test/common.js +++ b/test/common.js @@ -1 +1,57 @@ -module.exports.connectionString = "DRIVER={SQLite};DATABASE=data/sqlite-test.db"; +var odbc = require("../"); +//odbc.library = '/usr/lib/odbc/libsqlite3odbc-0.91'; +//odbc.library = '/usr/lib/x86_64-linux-gnu/odbc/libtdsodbc'; +//odbc.library = '/opt/sqlncli-11.0.1790.0/lib64/libsqlncli-11.0'; + +exports.connectionString = "DRIVER={SQLite3};DATABASE=data/sqlite-test.db"; +exports.title = "Sqlite3"; +exports.dialect = "sqlite"; +exports.user = ""; + +if (process.argv.length === 3) { + exports.connectionString = process.argv[2]; +} + +exports.connectionObject = { + DRIVER : "{SQLITE3}", + DATABASE : "data/sqlite-test.db" +}; + +try { + exports.testConnectionStrings = require('./config.testConnectionStrings.json'); +} +catch (e) { + exports.testConnectionStrings = [{ title : exports.title, connectionString : exports.connectionString, dialect : exports.dialect }]; +} + +try { + exports.benchConnectionStrings = require('./config.benchConnectionStrings.json'); +} +catch (e) { + exports.benchConnectionStrings = [{ title : exports.title, connectionString : exports.connectionString, dialect : exports.dialect }]; +} + +if (process.argv.length === 3) { + //look through the testConnectionStrings to see if there is a title that matches + //what was requested. + var lookup = process.argv[2]; + + exports.testConnectionStrings.forEach(function (connectionString) { + if (connectionString && (connectionString.title == lookup || connectionString.connectionString == lookup)) { + exports.connectionString = connectionString.connectionString; + exports.dialect = connectionString.dialect; + exports.user = connectionString.user; + } + }); +} + +exports.databaseName = "test"; +exports.tableName = "NODE_ODBC_TEST_TABLE"; + +exports.dropTables = function (db, cb) { + db.query("drop table " + exports.tableName, cb); +}; + +exports.createTables = function (db, cb) { + db.query("create table " + exports.tableName + " (COLINT INTEGER, COLDATETIME DATETIME, COLTEXT TEXT)", cb); +}; diff --git a/test/config.benchConnectionStrings.json b/test/config.benchConnectionStrings.json new file mode 100644 index 00000000..437710d6 --- /dev/null +++ b/test/config.benchConnectionStrings.json @@ -0,0 +1,6 @@ +[ + { "title" : "Sqlite3", "connectionString" : "DRIVER={SQLite3};DATABASE=data/sqlite-test.db" } + , { "title" : "MySQL-Local", "connectionString" : "DRIVER={MySQL};DATABASE=test;HOST=localhost;USER=test;" } + , { "title" : "MSSQL-FreeTDS-Remote", "connectionString" : "DRIVER={FreeTDS};SERVERNAME=sql2;DATABASE=test;UID=test;PWD=test;AutoTranslate=yes" } + , { "title" : "MSSQL-NativeCLI-Remote", "connectionString" : "DRIVER={SQL Server Native Client 11.0};SERVER=sql2;DATABASE=test;UID=test;PWD=test;" } +] diff --git a/test/config.testConnectionStrings.json b/test/config.testConnectionStrings.json new file mode 100644 index 00000000..1954bfae --- /dev/null +++ b/test/config.testConnectionStrings.json @@ -0,0 +1,6 @@ +[ + { "title" : "Sqlite3", "connectionString" : "DRIVER={SQLite3};DATABASE=data/sqlite-test.db", "dialect" : "sqlite", "user": "" } + , { "title" : "MySQL-Local", "connectionString" : "DRIVER={MySQL};DATABASE=test;HOST=localhost;SOCKET=/var/run/mysqld/mysqld.sock;USER=test;", "dialect" : "mysql", "user" : "test" } + , { "title" : "MSSQL-FreeTDS-Remote", "connectionString" : "DRIVER={FreeTDS};SERVERNAME=sql2;DATABASE=test;UID=test;PWD=test;AutoTranslate=yes;TEXTSIZE=10000000", "dialect" : "mssql", "user" : "test" } + , { "title" : "MSSQL-NativeCLI-Remote", "connectionString" : "DRIVER={SQL Server Native Client 11.0};SERVER=sql2;DATABASE=test;UID=test;PWD=test;", "dialect": "mssql", "user" : "test" } +] diff --git a/test/data/.gitignore b/test/data/.gitignore new file mode 100644 index 00000000..f1450df8 --- /dev/null +++ b/test/data/.gitignore @@ -0,0 +1 @@ +sqlite-test.db diff --git a/test/data/sqlite-test.db b/test/data/sqlite-test.db deleted file mode 100644 index 8136db4d..00000000 Binary files a/test/data/sqlite-test.db and /dev/null differ diff --git a/test/disabled/bench-insert.js b/test/disabled/bench-insert.js new file mode 100644 index 00000000..f1538649 --- /dev/null +++ b/test/disabled/bench-insert.js @@ -0,0 +1,65 @@ +var common = require("./common") + , odbc = require("../") + , db = new odbc.Database(); + +db.open(common.connectionString, function(err){ + if (err) { + console.error(err); + process.exit(1); + } + + createTable(); +}); + +function createTable() { + db.query("create table bench_insert (str varchar(50))", function (err) { + if (err) { + console.error(err); + return finish(); + } + + return insertData(); + }); +} + +function dropTable() { + db.query("drop table bench_insert", function (err) { + if (err) { + console.error(err); + return finish(); + } + + return finish(); + }); +} + +function insertData() { + var count = 0 + , iterations = 10000 + , time = new Date().getTime(); + + for (var x = 0; x < iterations; x++) { + db.query("insert into bench_insert (str) values ('testing')", cb); + + } + + function cb (err) { + if (err) { + console.error(err); + return finish(); + } + + if (++count == iterations) { + var elapsed = new Date().getTime() - time; + + console.log("%d records inserted in %d seconds, %d/sec", iterations, elapsed/1000, iterations/(elapsed/1000)); + return dropTable(); + } + } +} + +function finish() { + db.close(function () { + console.log("connection closed"); + }); +} diff --git a/test/test-issue-13.js b/test/disabled/test-issue-13.js similarity index 100% rename from test/test-issue-13.js rename to test/disabled/test-issue-13.js diff --git a/test/disabled/test-prepare-bind-executeNonQuery.js b/test/disabled/test-prepare-bind-executeNonQuery.js new file mode 100644 index 00000000..4f5aa835 --- /dev/null +++ b/test/disabled/test-prepare-bind-executeNonQuery.js @@ -0,0 +1,63 @@ +var common = require("./common") + , odbc = require("../") + , assert = require("assert") + , db = new odbc.Database() + , iterations = 100000 + ; + +db.open(common.connectionString, function(err){ + if (err) { + console.error(err); + process.exit(1); + } + + issueQuery2(function () { + finish(); + }); +}); + +function issueQuery2(done) { + var count = 0 + , time = new Date().getTime(); + + var stmt = db.prepareSync('select ? as test'); + + for (var x = 0; x < iterations; x++) { + (function (x) { + stmt.bind([x], function (err) { + if (err) { + console.log(err); + return finish(); + } + + stmt.executeNonQuery(function (err, result) { + cb(err, result, x); + }); + }); + })(x); + } + + function cb (err, data, x) { + if (err) { + console.error(err); + return finish(); + } + + //TODO: there's nothing to assert in this case. + //we actually need to insert data and then get + //the data back out and then assert. + + if (++count == iterations) { + var elapsed = new Date().getTime() - time; + + console.log("%d queries issued in %d seconds, %d/sec : Prepare - Bind - ExecuteNonQuery ", count, elapsed/1000, Math.floor(count/(elapsed/1000))); + return done(); + } + } +} + +function finish() { + db.close(function () { + console.log("connection closed"); + }); +} diff --git a/test/disabled/test-prepare-bindSync-executeNonQuery.js b/test/disabled/test-prepare-bindSync-executeNonQuery.js new file mode 100644 index 00000000..40aad267 --- /dev/null +++ b/test/disabled/test-prepare-bindSync-executeNonQuery.js @@ -0,0 +1,50 @@ +var common = require("./common") + , odbc = require("../") + , db = new odbc.Database() + , iterations = 100000 + ; + +db.open(common.connectionString, function(err){ + if (err) { + console.error(err); + process.exit(1); + } + + issueQuery2(function () { + finish(); + }); +}); + +function issueQuery2(done) { + var count = 0 + , time = new Date().getTime(); + + var stmt = db.prepareSync('select ? as test'); + + for (var x = 0; x < iterations; x++) { + (function (x) { + stmt.bindSync([x]); + stmt.executeNonQuery(cb); + })(x); + } + + function cb (err, data) { + if (err) { + console.error(err); + return finish(); + } + + if (++count == iterations) { + var elapsed = new Date().getTime() - time; + + console.log("%d queries issued in %d seconds, %d/sec : Prepare - Bind - ExecuteNonQuery ", count, elapsed/1000, Math.floor(count/(elapsed/1000))); + return done(); + } + } +} + +function finish() { + db.close(function () { + console.log("connection closed"); + }); +} diff --git a/test/odbc-bench.c b/test/odbc-bench.c new file mode 100644 index 00000000..ff912e5d --- /dev/null +++ b/test/odbc-bench.c @@ -0,0 +1,123 @@ +/* + Copyright (c) 2012, Dan VerWeire + Copyright (c) 2011, Lee Smith + + Permission to use, copy, modify, and/or distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ + +#include +#include +#include +#include +#include +#include +#include + +#define MAX_FIELD_SIZE 1024 +#define MAX_VALUE_SIZE 1048576 + + + +int main() { + HENV m_hEnv; + HDBC m_hDBC; + HSTMT m_hStmt; + SQLRETURN ret; + SQLUSMALLINT canHaveMoreResults; + //SQLCHAR outstr[1024]; + //SQLSMALLINT outstrlen; + + if( SQL_SUCCEEDED(SQLAllocEnv( &m_hEnv )) ) { + + if( SQL_SUCCEEDED(SQLAllocHandle( SQL_HANDLE_DBC, m_hEnv, &m_hDBC )) ) { + SQLSetConnectOption( m_hDBC, SQL_LOGIN_TIMEOUT,5 ); + + ret = SQLDriverConnect( + m_hDBC, + NULL, + "DRIVER={MySQL};SERVER=localhost;USER=test;PASSWORD=;DATABASE=test;", + SQL_NTS, + NULL,//outstr, + 0,//sizeof(outstr), + NULL,//&outstrlen, + SQL_DRIVER_NOPROMPT + ); + + if( SQL_SUCCEEDED(ret) ) { + int iterations = 10000; + int i = 0; + struct timeb start; + + ftime(&start); + + for (i =0 ; i <= iterations; i ++) { + SQLAllocHandle(SQL_HANDLE_STMT, m_hDBC, &m_hStmt); + + SQLExecDirect(m_hStmt, "select 1 + 1 as test;", SQL_NTS); + + while ( SQL_SUCCEEDED(SQLFetch(m_hStmt) )) { + //printf("sql query succeeded\n"); + } + + SQLFreeHandle(SQL_HANDLE_STMT, m_hStmt); + } + + struct timeb stop; + ftime(&stop); + + double elapsed = ((stop.time * 1000 + stop.millitm) - (start.time * 1000 + start.millitm)); + + printf("%d queries issued in %f seconds, %f/sec\n", iterations, (double) elapsed / 1000, iterations/((double) elapsed / 1000)); + } + else { + printf("here3\n"); + printError("SQLDriverConnect", m_hDBC, SQL_HANDLE_DBC); + } + } + else { + printError("SQLAllocHandle - dbc", m_hEnv, SQL_HANDLE_ENV); + } + } + else { + printError("SQLAllocHandle - env", m_hEnv, SQL_HANDLE_ENV); + } + + //SQLFreeHandle(SQL_HANDLE_DBC, m_hDBC); + //SQLFreeHandle(SQL_HANDLE_ENV, m_hEnv); + + return 0; +} + +void printError(const char *fn, SQLHANDLE handle, SQLSMALLINT type) +{ + SQLINTEGER i = 0; + SQLINTEGER native; + SQLCHAR state[ 7 ]; + SQLCHAR text[256]; + SQLSMALLINT len; + SQLRETURN ret; + + printf( + "\n" + "The driver reported the following diagnostics whilst running " + "%s\n\n", + fn + ); + + do { + ret = SQLGetDiagRec(type, handle, ++i, state, &native, text, sizeof(text), &len ); + if (SQL_SUCCEEDED(ret)) + printf("%s:%ld:%ld:%s\n", state, (long int) i, (long int) native, text); + } + while( ret == SQL_SUCCESS ); +} diff --git a/test/run-bench.js b/test/run-bench.js new file mode 100644 index 00000000..1e5181a6 --- /dev/null +++ b/test/run-bench.js @@ -0,0 +1,79 @@ +var fs = require("fs") + , common = require('./common.js') + , spawn = require("child_process").spawn + , requestedBench = null + , files + ; + +if (process.argv.length === 3) { + requestedBench = process.argv[2]; +} + +var connectionStrings = common.benchConnectionStrings; + +//check to see if the requested test is actually a driver to benchmark +if (requestedBench) { + connectionStrings.forEach(function (connectionString) { + if (requestedBench == connectionString.title) { + connectionStrings = [connectionString]; + requestedBench = null; + } + }); +} + +doNextConnectionString(); + +function doBench(file, connectionString) { + var bench = spawn("node", ['--expose_gc',file, connectionString.connectionString]); + + process.stdout.write("Running \033[01;33m" + file.replace(/\.js$/, "") + "\033[01;0m with [\033[01;29m" + connectionString.title + "\033[01;0m] : "); + + bench.on("exit", function (code, signal) { + doNextBench(connectionString); + }); + + bench.stderr.on("data", function (data) { + process.stderr.write(data); + }); + + bench.stdout.on("data", function (data) { + process.stdout.write(data); + }); +} + +function doNextBench(connectionString) { + if (files.length) { + var benchFile = files.shift(); + + doBench(benchFile, connectionString); + } + else { + //we're done with this connection string, display results and exit accordingly + doNextConnectionString(); + } +} + +function doNextConnectionString() { + if (connectionStrings.length) { + var connectionString = connectionStrings.shift(); + + if (requestedBench) { + files = [requestedBench]; + } + else { + //re-read files + files = fs.readdirSync("./"); + + files = files.filter(function (file) { + return (/^bench-/.test(file)) ? true : false; + }); + + files.sort(); + } + + doNextBench(connectionString); + } + else { + console.log("Done"); + } +} diff --git a/test/run-tests.js b/test/run-tests.js new file mode 100644 index 00000000..4a82a9ac --- /dev/null +++ b/test/run-tests.js @@ -0,0 +1,119 @@ +var fs = require("fs") + , common = require('./common.js') + , spawn = require("child_process").spawn + , errorCount = 0 + , testCount = 0 + , testTimeout = 5000 + , requestedTest = null + , files + ; + +var filesDisabled = fs.readdirSync("./disabled"); + +if (filesDisabled.length) { + console.log("\n\033[01;31mWarning\033[01;0m : there are %s disabled tests\n", filesDisabled.length); +} + +if (process.argv.length === 3) { + requestedTest = process.argv[2]; +} + +var connectionStrings = common.testConnectionStrings; + +//check to see if the requested test is actually a driver to test +if (requestedTest) { + connectionStrings.forEach(function (connectionString) { + if (requestedTest == connectionString.title) { + connectionStrings = [connectionString]; + requestedTest = null; + } + }); +} + +doNextConnectionString(); + + +function doTest(file, connectionString) { + var test = spawn("node", ['--expose_gc',file, connectionString.connectionString]) + , timer = null + , timedOut = false; + ; + + process.stdout.write("Running test for [\033[01;29m" + connectionString.title + "\033[01;0m] : " + file.replace(/\.js$/, "")); + process.stdout.write(" ... "); + + testCount += 1; + + //TODO: process the following if some flag is set + //test.stdout.pipe(process.stdout); + //test.stderr.pipe(process.stderr); + + test.on("exit", function (code, signal) { + clearTimeout(timer); + + if (code != 0) { + errorCount += 1; + + process.stdout.write("\033[01;31mfail \033[01;0m "); + + if (timedOut) { + process.stdout.write("(Timed Out)"); + } + } + else { + process.stdout.write("\033[01;32msuccess \033[01;0m "); + } + + process.stdout.write("\n"); + + doNextTest(connectionString); + }); + + var timer = setTimeout(function () { + timedOut = true; + test.kill(); + },testTimeout); +} + +function doNextTest(connectionString) { + if (files.length) { + var testFile = files.shift(); + + doTest(testFile, connectionString); + } + else { + //we're done with this connection string, display results and exit accordingly + doNextConnectionString(); + } +} + +function doNextConnectionString() { + if (connectionStrings.length) { + var connectionString = connectionStrings.shift(); + + if (requestedTest) { + files = [requestedTest]; + } + else { + //re-read files + files = fs.readdirSync("./"); + + files = files.filter(function (file) { + return (/^test-/.test(file)) ? true : false; + }); + + files.sort(); + } + + doNextTest(connectionString); + } + else { + if (errorCount) { + console.log("\nResults : %s of %s tests failed.\n", errorCount, testCount); + process.exit(errorCount); + } + else { + console.log("Results : All tests were successful."); + } + } +} diff --git a/test/sql-cli.js b/test/sql-cli.js index d8c73f4d..50ce0688 100644 --- a/test/sql-cli.js +++ b/test/sql-cli.js @@ -1,5 +1,5 @@ var common = require("./common") - , odbc = require("../odbc.js") + , odbc = require("../") , db = new odbc.Database(); db.open(common.connectionString, function(err) diff --git a/test/test-bad-connection-string.js b/test/test-bad-connection-string.js new file mode 100644 index 00000000..67b4e63d --- /dev/null +++ b/test/test-bad-connection-string.js @@ -0,0 +1,27 @@ +var common = require("./common") + , odbc = require("../") + , db = new odbc.Database() + , assert = require("assert") + ; + +assert.throws(function () { + db.openSync("this is wrong"); +}); + +assert.equal(db.connected, false); + +db.open("this is wrong", function(err) { + console.log(err); + + assert.deepEqual(err, { + error: '[node-odbc] SQL_ERROR', + message: '[unixODBC][Driver Manager]Data source name not found, and no default driver specified', + state: 'IM002' + , errors : [{ + message: '[unixODBC][Driver Manager]Data source name not found, and no default driver specified', + state: 'IM002' + }] + }); + + assert.equal(db.connected, false); +}); diff --git a/test/test-binding-connection-loginTimeout.js b/test/test-binding-connection-loginTimeout.js new file mode 100644 index 00000000..e6d77529 --- /dev/null +++ b/test/test-binding-connection-loginTimeout.js @@ -0,0 +1,31 @@ +var common = require("./common") + , odbc = require("../") + , db = new odbc.ODBC() + , assert = require("assert") + , exitCode = 0 + ; + +db.createConnection(function (err, conn) { + //loginTimeout should be 5 by default as set in C++ + assert.equal(conn.loginTimeout, 5); + + //test the setter and getter + conn.loginTimeout = 1234; + assert.equal(conn.loginTimeout, 1234); + + //set the time out to something small + conn.loginTimeout = 1; + assert.equal(conn.loginTimeout, 1); + + conn.open(common.connectionString, function (err) { + //TODO: it would be nice if we could somehow + //force a timeout to occurr, but most testing is + //done locally and it's hard to get a local server + //to not accept a connection within one second... + + console.log(err); + conn.close(function () { + + }); + }); +}); diff --git a/test/test-binding-connection-timeOut.js b/test/test-binding-connection-timeOut.js new file mode 100644 index 00000000..dc30d0de --- /dev/null +++ b/test/test-binding-connection-timeOut.js @@ -0,0 +1,31 @@ +var common = require("./common") + , odbc = require("../") + , db = new odbc.ODBC() + , assert = require("assert") + , exitCode = 0 + ; + +db.createConnection(function (err, conn) { + //connectionTimeout should be 0 by default as set in C++ + assert.equal(conn.connectTimeout, 0); + + //test the setter and getter + conn.connectTimeout = 1234; + assert.equal(conn.connectTimeout, 1234); + + //set the time out to something small + conn.connectTimeout = 1; + assert.equal(conn.connectTimeout, 1); + + conn.open(common.connectionString, function (err) { + //TODO: it would be nice if we could somehow + //force a timeout to occurr, but most testing is + //done locally and it's hard to get a local server + //to not accept a connection within one second... + + console.log(err); + conn.close(function () { + + }); + }); +}); diff --git a/test/test-binding-statement-executeSync.js b/test/test-binding-statement-executeSync.js new file mode 100644 index 00000000..8c5dba00 --- /dev/null +++ b/test/test-binding-statement-executeSync.js @@ -0,0 +1,81 @@ +var common = require("./common") + , odbc = require("../") + , db = new odbc.ODBC() + , assert = require("assert") + , exitCode = 0 + ; + +db.createConnection(function (err, conn) { + conn.openSync(common.connectionString); + + conn.createStatement(function (err, stmt) { + var r, result, caughtError; + + //try excuting without preparing or binding. + try { + result = stmt.executeSync(); + } + catch (e) { + caughtError = e; + } + + try { + assert.ok(caughtError); + } + catch (e) { + console.log(e.message); + exitCode = 1; + } + + //try incorrectly binding a string and then executeSync + try { + r = stmt.bind("select 1 + 1 as col1"); + } + catch (e) { + caughtError = e; + } + + try { + assert.equal(caughtError.message, "Argument 1 must be an Array"); + + r = stmt.prepareSync("select 1 + ? as col1"); + assert.equal(r, true, "prepareSync did not return true"); + + r = stmt.bindSync([2]); + assert.equal(r, true, "bindSync did not return true"); + + result = stmt.executeSync(); + assert.equal(result.constructor.name, "ODBCResult"); + + r = result.fetchAllSync(); + assert.deepEqual(r, [ { col1: 3 } ]); + + r = result.closeSync(); + assert.equal(r, true, "closeSync did not return true"); + + result = stmt.executeSync(); + assert.equal(result.constructor.name, "ODBCResult"); + + r = result.fetchAllSync(); + assert.deepEqual(r, [ { col1: 3 } ]); + + console.log(r); + } + catch (e) { + console.log(e.stack); + + exitCode = 1; + } + + conn.closeSync(); + + if (exitCode) { + console.log("failed"); + } + else { + console.log("success"); + } + + process.exit(exitCode); + }); +}); diff --git a/test/test-binding-statement-rebinding.js b/test/test-binding-statement-rebinding.js new file mode 100644 index 00000000..51d4df86 --- /dev/null +++ b/test/test-binding-statement-rebinding.js @@ -0,0 +1,52 @@ +var common = require("./common") + , odbc = require("../") + , db = new odbc.ODBC() + , assert = require("assert") + , exitCode = 0 + ; + +db.createConnection(function (err, conn) { + conn.openSync(common.connectionString); + + conn.createStatement(function (err, stmt) { + var r, result, caughtError; + + var a = ['hello', 'world']; + + stmt.prepareSync('select ? as col1, ? as col2'); + + stmt.bindSync(a); + + result = stmt.executeSync(); + + console.log(result.fetchAllSync()); + result.closeSync(); + + a[0] = 'goodbye'; + a[1] = 'steven'; + stmt.bindSync(a); + + result = stmt.executeSync(); + + r = result.fetchAllSync(); + + try { + assert.deepEqual(r, [ { col1: 'goodbye', col2: 'steven' } ]); + } + catch (e) { + console.log(e.stack); + exitCode = 1; + } + + conn.closeSync(); + + if (exitCode) { + console.log("failed"); + } + else { + console.log("success"); + } + + process.exit(exitCode); + }); +}); diff --git a/test/test-binding-transaction-commit.js b/test/test-binding-transaction-commit.js new file mode 100644 index 00000000..029f4aff --- /dev/null +++ b/test/test-binding-transaction-commit.js @@ -0,0 +1,79 @@ +var common = require("./common") + , odbc = require("../") + , db = new odbc.ODBC() + , assert = require("assert") + , exitCode = 0 + ; + +db.createConnection(function (err, conn) { + + conn.openSync(common.connectionString); + + common.createTables(conn, function (err, data) { + test1() + + function test1() { + conn.beginTransaction(function (err) { + if (err) { + console.log("Error beginning transaction."); + console.log(err); + exitCode = 1 + } + + var result = conn.querySync("insert into " + common.tableName + " (COLINT, COLDATETIME, COLTEXT) VALUES (42, null, null)" ); + + //rollback + conn.endTransaction(true, function (err) { + if (err) { + console.log("Error rolling back transaction"); + console.log(err); + exitCode = 2 + } + + result = conn.querySync("select * from " + common.tableName); + data = result.fetchAllSync(); + + assert.deepEqual(data, []); + + test2(); + }); + }); + } + + function test2 () { + //Start a new transaction + conn.beginTransaction(function (err) { + if (err) { + console.log("Error beginning transaction"); + console.log(err); + exitCode = 3 + } + + result = conn.querySync("insert into " + common.tableName + " (COLINT, COLDATETIME, COLTEXT) VALUES (42, null, null)" ); + + //commit + conn.endTransaction(false, function (err) { + if (err) { + console.log("Error committing transaction"); + console.log(err); + exitCode = 3 + } + + result = conn.querySync("select * from " + common.tableName); + data = result.fetchAllSync(); + + assert.deepEqual(data, [ { COLINT: 42, COLDATETIME: null, COLTEXT: null } ]); + + finish(); + }); + }); + } + + function finish() { + common.dropTables(conn, function (err) { + conn.closeSync(); + process.exit(exitCode); + }); + } + }); +}); diff --git a/test/test-binding-transaction-commitSync.js b/test/test-binding-transaction-commitSync.js new file mode 100644 index 00000000..3e004d86 --- /dev/null +++ b/test/test-binding-transaction-commitSync.js @@ -0,0 +1,53 @@ +var common = require("./common") + , odbc = require("../") + , db = new odbc.ODBC() + , assert = require("assert") + , exitCode = 0 + ; + +db.createConnection(function (err, conn) { + conn.openSync(common.connectionString); + + common.createTables(conn, function (err, data) { + try { + conn.beginTransactionSync(); + + var result = conn.querySync("insert into " + common.tableName + " (COLINT, COLDATETIME, COLTEXT) VALUES (42, null, null)" ); + + conn.endTransactionSync(true); //rollback + + result = conn.querySync("select * from " + common.tableName); + + assert.deepEqual(result.fetchAllSync(), []); + } + catch (e) { + console.log("Failed when rolling back"); + console.log(e.stack); + exitCode = 1 + } + + try { + //Start a new transaction + conn.beginTransactionSync(); + + result = conn.querySync("insert into " + common.tableName + " (COLINT, COLDATETIME, COLTEXT) VALUES (42, null, null)" ); + + conn.endTransactionSync(false); //commit + + result = conn.querySync("select * from " + common.tableName); + + assert.deepEqual(result.fetchAllSync(), [ { COLINT: 42, COLDATETIME: null, COLTEXT: null } ]); + } + catch (e) { + console.log("Failed when committing"); + console.log(e.stack); + + exitCode = 2; + } + + common.dropTables(conn, function (err) { + conn.closeSync(); + process.exit(exitCode); + }); + }); +}); diff --git a/test/test-closed.js b/test/test-closed.js index 71511982..155685e1 100644 --- a/test/test-closed.js +++ b/test/test-closed.js @@ -1,21 +1,14 @@ var common = require("./common") - , odbc = require("../odbc.js") - , db = new odbc.Database(); + , odbc = require("../") + , db = new odbc.Database() + , assert = require("assert") + ; -db.query("select * from test", function (err, rs, moreResultSets) { - console.error(arguments); -}); +assert.equal(db.connected, false); -db.open(common.connectionString, function(err) -{ - console.error('db.open callback'); - - db.close(function () { - console.error('db.close callback'); - - db.query("select * from test", function (err, rs, moreResultSets) { - console.error('db.query callback'); - console.error(arguments); - }); - }); +db.query("select * from test", function (err, rs, moreResultSets) { + assert.deepEqual(err, { message: 'Connection not open.' }); + assert.deepEqual(rs, []); + assert.equal(moreResultSets, false); + assert.equal(db.connected, false); }); diff --git a/test/test-connection-object.js b/test/test-connection-object.js new file mode 100644 index 00000000..d780297e --- /dev/null +++ b/test/test-connection-object.js @@ -0,0 +1,13 @@ +var common = require("./common") + , odbc = require("../") + , db = new odbc.Database() + , assert = require("assert") + ; + +db.open(common.connectionObject, function(err){ + assert.equal(err, null); + + db.close(function () { + assert.equal(db.connected, false); + }); +}); diff --git a/test/test-date.js b/test/test-date.js new file mode 100644 index 00000000..d0309a28 --- /dev/null +++ b/test/test-date.js @@ -0,0 +1,45 @@ +var common = require("./common") + , odbc = require("../") + , db = new odbc.Database() + , assert = require("assert") + ; + +var sqlite = /sqlite/i.test(common.connectionString); + +db.open(common.connectionString, function(err) { + assert.equal(err, null); + assert.equal(db.connected, true); + + var dt = new Date(); + dt.setMilliseconds(0); // MySQL truncates them. + var ds = dt.toISOString().replace('Z',''); + var sql = "SELECT cast('" + ds + "' as datetime) as DT1"; + // XXX(bnoordhuis) sqlite3 has no distinct DATETIME or TIMESTAMP type. + // 'datetime' in this expression is a function, not a type. + if (sqlite) sql = "SELECT datetime('" + ds + "') as DT1"; + console.log(sql); + + db.query(sql, function (err, data) { + assert.equal(err, null); + assert.equal(data.length, 1); + + db.close(function () { + assert.equal(db.connected, false); + console.log(dt); + console.log(data); + + //test selected data after the connection + //is closed, in case the assertion fails + if (sqlite) { + assert.equal(data[0].DT1.constructor.name, "String", "DT1 is not an instance of a String object"); + assert.equal(data[0].DT1, ds.replace('T', ' ').replace(/\.\d+$/, '')); + } else { + assert.equal(data[0].DT1.constructor.name, "Date", "DT1 is not an instance of a Date object"); + // XXX(bnoordhuis) DT1 is in local time but we inserted + // a UTC date so we need to adjust it before comparing. + dt = new Date(dt.getTime() + 6e4 * dt.getTimezoneOffset()); + assert.equal(data[0].DT1.toISOString(), dt.toISOString()); + } + }); + }); +}); diff --git a/test/test-describe-column.js b/test/test-describe-column.js index 7f6af2ce..1cbe6759 100644 --- a/test/test-describe-column.js +++ b/test/test-describe-column.js @@ -1,19 +1,34 @@ var common = require("./common") - , odbc = require("../odbc.js") - , db = new odbc.Database(); + , odbc = require("../") + , db = new odbc.Database() + , assert = require("assert") + ; -db.open(common.connectionString, function(err) -{ - db.describe({ - database : 'main', - table : 'test', - column : 'col1' - }, function (err, data) { - if (err) { - console.error(err); - process.exit(1); - } - - console.error(data); - }); +db.openSync(common.connectionString); + +console.log("connected"); + +common.dropTables(db, function (err) { + if (err) console.log(err.message); + + console.log("tables dropped"); + + common.createTables(db, function (err) { + if (err) console.log(err.message); + + console.log("tables created"); + + db.describe({ + database : common.databaseName, + table : common.tableName, + column : 'COLDATETIME' + }, function (err, data) { + if (err) console.log(err.message); + + console.log(data); + + db.closeSync(); + assert.ok(data.length, "No records returned when attempting to describe the column COLDATETIME"); + }); + }); }); diff --git a/test/test-describe-database.js b/test/test-describe-database.js index 50ba4e39..a715864f 100644 --- a/test/test-describe-database.js +++ b/test/test-describe-database.js @@ -1,17 +1,18 @@ var common = require("./common") - , odbc = require("../odbc.js") - , db = new odbc.Database(); + , odbc = require("../") + , db = new odbc.Database() + , assert = require("assert") + ; -db.open(common.connectionString, function(err) -{ - db.describe({ - database : 'main' - }, function (err, data) { - if (err) { - console.error(err); - process.exit(1); - } - - console.error(data); - }); +db.openSync(common.connectionString); + +common.dropTables(db, function () { + common.createTables(db, function () { + db.describe({ + database : common.databaseName + }, function (err, data) { + db.closeSync(); + assert.ok(data.length, "No records returned when attempting to describe the database"); + }); + }); }); diff --git a/test/test-describe-table.js b/test/test-describe-table.js index 4c88d18f..23b8495b 100644 --- a/test/test-describe-table.js +++ b/test/test-describe-table.js @@ -1,18 +1,20 @@ var common = require("./common") - , odbc = require("../odbc.js") - , db = new odbc.Database(); + , odbc = require("../") + , db = new odbc.Database() + , assert = require("assert") + ; -db.open(common.connectionString, function(err) -{ - db.describe({ - database : 'main', - table : 'test' - }, function (err, data) { - if (err) { - console.error(err); - process.exit(1); - } - - console.error(data); - }); +db.openSync(common.connectionString); + +common.dropTables(db, function () { + common.createTables(db, function () { + + db.describe({ + database : common.databaseName + , table : common.tableName + }, function (err, data) { + db.closeSync(); + assert.ok(data.length, "No records returned when attempting to describe the tabe " + common.tableName); + }); + }); }); diff --git a/test/test-domains-open.js b/test/test-domains-open.js new file mode 100644 index 00000000..042e7afc --- /dev/null +++ b/test/test-domains-open.js @@ -0,0 +1,19 @@ +var domain = require("domain"); + +var d = domain.create(); + +d.on("error", function (error) { + console.log("Error caught!", error); +}); + +d.run(function() { + var db = require("../")(); + + console.trace(); + + db.open("wrongConnectionString", function (error) { + console.trace(); + + throw new Error(); + }); +}); diff --git a/test/test-getInfoSync.js b/test/test-getInfoSync.js new file mode 100644 index 00000000..516d4246 --- /dev/null +++ b/test/test-getInfoSync.js @@ -0,0 +1,10 @@ +var common = require("./common") + , odbc = require("../") + , db = new odbc.Database() + , assert = require("assert"); + +db.openSync(common.connectionString); +console.log(common); +var userName = db.conn.getInfoSync(odbc.SQL_USER_NAME); +assert.equal(userName, common.user); + diff --git a/test/test-global-open-close.js b/test/test-global-open-close.js new file mode 100644 index 00000000..df4aea7d --- /dev/null +++ b/test/test-global-open-close.js @@ -0,0 +1,14 @@ +var common = require("./common") + , odbc = require("../") + , assert = require("assert"); + +odbc.open(common.connectionString, function (err, conn) { + if (err) { + console.log(err); + } + assert.equal(err, null); + assert.equal(conn.constructor.name, 'Database'); + + conn.close(); +}); + diff --git a/test/test-instantiate-one-and-end.js b/test/test-instantiate-one-and-end.js new file mode 100644 index 00000000..315bd95c --- /dev/null +++ b/test/test-instantiate-one-and-end.js @@ -0,0 +1,9 @@ +var odbc = require("../") + , db = new odbc.Database() + ; + +//This test should just exit. The only reason it should stay open is if a +//connection has been established. But all we have done here is instantiate +//the object. + +console.log("done"); \ No newline at end of file diff --git a/test/test-issue-54.js b/test/test-issue-54.js new file mode 100644 index 00000000..4dedbad1 --- /dev/null +++ b/test/test-issue-54.js @@ -0,0 +1,38 @@ +//NOTE: this does not assert anything that it should, please fix. + +var common = require("./common") + , odbc = require("../") + , db = new odbc.Database() + , assert = require("assert") + , util = require('util') + , count = 0 + ; + +var sql = +"declare @t table (x int); \ +insert @t values (1); \ +select 'You will get this message' \ +raiserror('You will never get this error!', 16, 100); \ +raiserror('Two errors in a row! WHAT?', 16, 100); \ +select 'You will never get this message, either!' as msg; \ +" + +db.open(common.connectionString, function(err) { + console.log(err || "Connected") + + if (!err) { + db.query(sql, function (err, results, more) { + console.log("q1 result: ", err, results, more) + + if (!more) { + console.log("Running second query") + + db.query("select 1 as x", function(err, results, more) { + console.log("q2 result: ", err, results, more) + + db.close(function(err) { console.log(err || "Closed") }) + }) + } + }) + } +}); \ No newline at end of file diff --git a/test/test-issue-85.js b/test/test-issue-85.js new file mode 100644 index 00000000..3d7f499a --- /dev/null +++ b/test/test-issue-85.js @@ -0,0 +1,29 @@ +var common = require("./common") + , odbc = require("../") + , db = new odbc.Database() + , assert = require("assert") + , util = require('util') + , count = 0 + ; + +var sql = (common.dialect == 'sqlite' || common.dialect =='mysql') + ? 'select cast(-1 as signed) as test, cast(-2147483648 as signed) as test2, cast(2147483647 as signed) as test3;' + : 'select cast(-1 as int) as test, cast(-2147483648 as int) as test2, cast(2147483647 as int) as test3;' + ; + +db.open(common.connectionString, function(err) { + console.error(err || "Connected") + + if (!err) { + db.query(sql, function (err, results, more) { + console.log(results); + + assert.equal(err, null); + assert.equal(results[0].test, -1); + assert.equal(results[0].test2, -2147483648); + assert.equal(results[0].test3, 2147483647); + + db.close(function(err) { console.log(err || "Closed") }) + }) + } +}); diff --git a/test/test-issue-get-column-value-2.js b/test/test-issue-get-column-value-2.js new file mode 100644 index 00000000..6fb36aa3 --- /dev/null +++ b/test/test-issue-get-column-value-2.js @@ -0,0 +1,45 @@ +var common = require("./common") + , odbc = require("../") + , db = new odbc.Database() + , assert = require("assert") + , util = require('util') + , count = 0 + ; + +var getSchema = function () { + var db = new odbc.Database(); + + console.log(util.format('Count %s, time %s', count, new Date())); + //console.log(db); + + db.open(common.connectionString, function(err) { + if (err) { + console.error("connection error: ", err.message); + db.close(function(){}); + return; + } + + db.describe({database: 'main', schema: 'RETAIL', table: common.tableName }, function (err, rows) { +// db.query("select * from " + common.tableName, function (err, rows) { + if (err) { + console.error("describe error: ", err.message); + db.close(function(){}); + return; + } + + db.close(function() { + console.log("Connection Closed"); + db = null; + count += 1; + if (count < 100) { + setImmediate(getSchema); + } + else { + process.exit(0); + } + }); + }); + }); +}; + +getSchema(); \ No newline at end of file diff --git a/test/test-memory-leaks-new-objects.js b/test/test-memory-leaks-new-objects.js new file mode 100644 index 00000000..1be00a43 --- /dev/null +++ b/test/test-memory-leaks-new-objects.js @@ -0,0 +1,33 @@ +var odbc = require("../") + , openCount = 100 + , start = process.memoryUsage().heapUsed + , x = 100 + ; + +gc(); + +start = process.memoryUsage().heapUsed; + +for (x = 0; x < openCount; x++ ) { + (function () { + var db = new odbc.Database(); + db = null; + })(); +} + +gc(); + +console.log(process.memoryUsage().heapUsed - start); + +gc(); + +for (x = 0; x < openCount; x++ ) { + (function () { + var db = new odbc.Database(); + db = null; + })(); +} + +gc(); + +console.log(process.memoryUsage().heapUsed - start); \ No newline at end of file diff --git a/test/test-multi-open-close.js b/test/test-multi-open-close.js new file mode 100644 index 00000000..9d6097e9 --- /dev/null +++ b/test/test-multi-open-close.js @@ -0,0 +1,49 @@ +var common = require("./common") + , odbc = require("../") + , openCallback = 0 + , closeCallback = 0 + , openCount = 100 + , connections = [] + ; + +for (var x = 0; x < openCount; x++ ) { + (function () { + var db = new odbc.Database(); + connections.push(db); + + db.open(common.connectionString, function(err) { + if (err) { + throw err; + process.exit(1); + } + + openCallback += 1; + + maybeClose(); + }); + })(); +} + +function maybeClose() { + + if (openCount == openCallback) { + doClose(); + } +} + + +function doClose() { + connections.forEach(function (db) { + db.close(function () { + closeCallback += 1; + + maybeFinish(); + }); + }); +} + +function maybeFinish() { + if (openCount == closeCallback) { + console.log('Done'); + } +} diff --git a/test/test-multi-open-query-close.js b/test/test-multi-open-query-close.js new file mode 100644 index 00000000..766e8c3e --- /dev/null +++ b/test/test-multi-open-query-close.js @@ -0,0 +1,75 @@ +var common = require("./common") +, odbc = require("../") +, openCallback = 0 +, closeCallback = 0 +, queryCallback = 0 +, openCount = 3 +, connections = [] +; + +for (var x = 0; x < openCount; x++ ) { + (function (x) { + var db = new odbc.Database(); + connections.push(db); + + db.open(common.connectionString, function(err) { + if (err) { + throw err; + process.exit(); + } + + //console.error("Open: %s %s %s", x, openCount, openCallback); + + openCallback += 1; + + maybeQuery(); + }); + })(x); +} + +function maybeQuery() { + if (openCount == openCallback) { + doQuery(); + } +} + +function doQuery() { + connections.forEach(function (db, ix) { + var seconds = connections.length - ix; + + var query = "WAITFOR DELAY '00:00:0" + seconds + "'; select " + seconds + " as result"; + + db.query(query, function (err, rows, moreResultSets) { + + //console.error("Query: %s %s %s %s", ix, openCount, queryCallback, moreResultSets, rows, err); + + queryCallback += 1; + + maybeClose(); + }); + }); +} + +function maybeClose() { + if (openCount == queryCallback) { + doClose(); + } +} + +function doClose() { + connections.forEach(function (db, ix) { + db.close(function () { + //console.log("Close: %s %s %s", ix, openCount, closeCallback); + + closeCallback += 1; + + maybeFinish(); + }); + }); +} + +function maybeFinish() { + if (openCount == closeCallback) { + console.error('done'); + } +} diff --git a/test/test-multi-openSync-closeSync.js b/test/test-multi-openSync-closeSync.js new file mode 100644 index 00000000..d948d8df --- /dev/null +++ b/test/test-multi-openSync-closeSync.js @@ -0,0 +1,30 @@ +var common = require("./common") + , odbc = require("../") + , openCallback = 0 + , closeCallback = 0 + , openCount = 100 + , connections = [] + , errorCount = 0; + ; + +for (var x = 0; x < openCount; x++ ) { + var db = new odbc.Database(); + connections.push(db); + + try { + db.openSync(common.connectionString); + } + catch (e) { + console.log(common.connectionString); + console.log(e.stack); + errorCount += 1; + break; + } +} + +connections.forEach(function (db) { + db.closeSync(); +}); + +console.log('Done'); +process.exit(errorCount); \ No newline at end of file diff --git a/test/test-open-close.js b/test/test-open-close.js new file mode 100644 index 00000000..2ee660f9 --- /dev/null +++ b/test/test-open-close.js @@ -0,0 +1,29 @@ +var common = require("./common") + , odbc = require("../") + , db = new odbc.Database() + , assert = require("assert"); + +assert.equal(db.connected, false); + +db.query("select * from " + common.tableName, function (err, rs, moreResultSets) { + assert.deepEqual(err, { message: 'Connection not open.' }); + assert.deepEqual(rs, []); + assert.equal(moreResultSets, false); + assert.equal(db.connected, false); +}); + +db.open(common.connectionString, function(err) { + assert.equal(err, null); + assert.equal(db.connected, true); + + db.close(function () { + assert.equal(db.connected, false); + + db.query("select * from " + common.tableName, function (err, rs, moreResultSets) { + assert.deepEqual(err, { message: 'Connection not open.' }); + assert.deepEqual(rs, []); + assert.equal(moreResultSets, false); + assert.equal(db.connected, false); + }); + }); +}); diff --git a/test/test-open-connectTimeout.js b/test/test-open-connectTimeout.js new file mode 100644 index 00000000..75857aa9 --- /dev/null +++ b/test/test-open-connectTimeout.js @@ -0,0 +1,24 @@ +var common = require("./common") + , odbc = require("../") + , assert = require("assert"); + +//test setting connectTimeout via the constructor works +var db = new odbc.Database({ connectTimeout : 1 }) + +db.open(common.connectionString, function(err) { + assert.equal(db.conn.connectTimeout, 1); + + assert.equal(err, null); + assert.equal(db.connected, true); + + db.close(function () { + assert.equal(db.connected, false); + + db.query("select * from " + common.tableName, function (err, rs, moreResultSets) { + assert.deepEqual(err, { message: 'Connection not open.' }); + assert.deepEqual(rs, []); + assert.equal(moreResultSets, false); + assert.equal(db.connected, false); + }); + }); +}); diff --git a/test/test-open-dont-close.js b/test/test-open-dont-close.js new file mode 100644 index 00000000..c1967b38 --- /dev/null +++ b/test/test-open-dont-close.js @@ -0,0 +1,12 @@ +var common = require("./common") + , odbc = require("../") + , db = new odbc.Database(); + +db.open(common.connectionString, function(err) { + console.error('db.open callback'); + console.error('node should just sit and wait'); + console.log(err); + //reference db here so it isn't garbage collected: + + console.log(db.connected); +}); diff --git a/test/test-open-loginTimeout.js b/test/test-open-loginTimeout.js new file mode 100644 index 00000000..0e036b6e --- /dev/null +++ b/test/test-open-loginTimeout.js @@ -0,0 +1,24 @@ +var common = require("./common") + , odbc = require("../") + , assert = require("assert"); + +//test setting loginTimeout via the constructor works +var db = new odbc.Database({ loginTimeout : 1 }) + +db.open(common.connectionString, function(err) { + assert.equal(db.conn.loginTimeout, 1); + + assert.equal(err, null); + assert.equal(db.connected, true); + + db.close(function () { + assert.equal(db.connected, false); + + db.query("select * from " + common.tableName, function (err, rs, moreResultSets) { + assert.deepEqual(err, { message: 'Connection not open.' }); + assert.deepEqual(rs, []); + assert.equal(moreResultSets, false); + assert.equal(db.connected, false); + }); + }); +}); diff --git a/test/test-openSync.js b/test/test-openSync.js new file mode 100644 index 00000000..88f6eeb2 --- /dev/null +++ b/test/test-openSync.js @@ -0,0 +1,31 @@ +var common = require("./common") + , odbc = require("../") + , db = new odbc.Database() + , assert = require("assert"); + +assert.equal(db.connected, false); + +db.query("select * from " + common.tableName, function (err, rs, moreResultSets) { + assert.deepEqual(err, { message: 'Connection not open.' }); + assert.deepEqual(rs, []); + assert.equal(moreResultSets, false); + assert.equal(db.connected, false); +}); + +console.log("Attempting to connect to: %s", common.connectionString); + +try { + db.openSync(common.connectionString); +} +catch(e) { + console.log(e.stack); + assert.deepEqual(e, null); +} + +try { + db.closeSync(); +} +catch(e) { + console.log(e.stack); + assert.deepEqual(e, null); +} diff --git a/test/test-param-select-with-booleans-only.js b/test/test-param-select-with-booleans-only.js new file mode 100644 index 00000000..487322cd --- /dev/null +++ b/test/test-param-select-with-booleans-only.js @@ -0,0 +1,21 @@ +var common = require("./common") + , odbc = require("../") + , db = new odbc.Database() + , assert = require("assert"); + + +db.open(common.connectionString, function (err) { + assert.equal(err, null); + + db.query("select ? as \"TRUECOL\", ? as \"FALSECOL\" " + , [true, false] + , function (err, data, more) { + db.close(function () { + assert.equal(err, null); + assert.deepEqual(data, [{ + TRUECOL: true, + FALSECOL: false + }]); + }); + }); +}); diff --git a/test/test-param-select-with-decimals-only.js b/test/test-param-select-with-decimals-only.js new file mode 100644 index 00000000..2b186db6 --- /dev/null +++ b/test/test-param-select-with-decimals-only.js @@ -0,0 +1,20 @@ +var common = require("./common") + , odbc = require("../") + , db = new odbc.Database() + , assert = require("assert"); + + +db.open(common.connectionString, function (err) { + assert.equal(err, null); + + db.query("select ? as \"DECCOL1\" " + , [5.5] + , function (err, data, more) { + db.close(function () { + assert.equal(err, null); + assert.deepEqual(data, [{ + DECCOL1: 5.5 + }]); + }); + }); +}); diff --git a/test/test-param-select-with-null.js b/test/test-param-select-with-null.js new file mode 100644 index 00000000..5c26a026 --- /dev/null +++ b/test/test-param-select-with-null.js @@ -0,0 +1,21 @@ +var common = require("./common") + , odbc = require("../") + , db = new odbc.Database() + , assert = require("assert"); + + +db.open(common.connectionString, function (err) { + assert.equal(err, null); + + db.query("select ? as \"NULLCOL1\" " + , [null] + , function (err, data, more) { + if (err) { console.error(err) } + db.close(function () { + assert.equal(err, null); + assert.deepEqual(data, [{ + NULLCOL1: null + }]); + }); + }); +}); diff --git a/test/test-param-select-with-nulls-mixed.js b/test/test-param-select-with-nulls-mixed.js new file mode 100644 index 00000000..4f1026e0 --- /dev/null +++ b/test/test-param-select-with-nulls-mixed.js @@ -0,0 +1,23 @@ +var common = require("./common") + , odbc = require("../") + , db = new odbc.Database() + , assert = require("assert"); + + +db.open(common.connectionString, function (err) { + assert.equal(err, null); + + db.query("select ? as \"TEXTCOL1\", ? as \"TEXTCOL2\", ? as \"NULLCOL1\" " + , ["something", "something", null] + , function (err, data, more) { + if (err) { console.error(err) } + db.close(function () { + assert.equal(err, null); + assert.deepEqual(data, [{ + TEXTCOL1: "something", + TEXTCOL2: "something", + NULLCOL1: null + }]); + }); + }); +}); diff --git a/test/test-param-select-with-numbers-mixed.js b/test/test-param-select-with-numbers-mixed.js new file mode 100644 index 00000000..8a5cb6b5 --- /dev/null +++ b/test/test-param-select-with-numbers-mixed.js @@ -0,0 +1,22 @@ +var common = require("./common") + , odbc = require("../") + , db = new odbc.Database() + , assert = require("assert"); + + +db.open(common.connectionString, function (err) { + assert.equal(err, null); + + db.query("select ? as TEXTCOL, ? as TEXTCOL2, ? as INTCOL " + , ["fish", "asdf", 1] + , function (err, data, more) { + db.close(function () { + assert.equal(err, null); + assert.deepEqual(data, [{ + TEXTCOL: 'fish', + TEXTCOL2: 'asdf', + INTCOL: 1 + }]); + }); + }); +}); diff --git a/test/test-param-select-with-numbers-only.js b/test/test-param-select-with-numbers-only.js new file mode 100644 index 00000000..ad500d55 --- /dev/null +++ b/test/test-param-select-with-numbers-only.js @@ -0,0 +1,24 @@ +var common = require("./common") + , odbc = require("../") + , db = new odbc.Database() + , assert = require("assert"); + + +db.open(common.connectionString, function (err) { + assert.equal(err, null); + + db.query("select ? as INTCOL1, ? as INTCOL2, ? as INTCOL3, ? as FLOATCOL4, ? as FLOATYINT" + , [5, 3, 1, 1.23456789012345, 12345.000] + , function (err, data, more) { + db.close(function () { + assert.equal(err, null); + assert.deepEqual(data, [{ + INTCOL1: 5, + INTCOL2: 3, + INTCOL3: 1, + FLOATCOL4 : 1.23456789012345, + FLOATYINT : 12345 + }]); + }); + }); +}); diff --git a/test/test-param-select-with-strings-only.js b/test/test-param-select-with-strings-only.js new file mode 100644 index 00000000..8ce19dc2 --- /dev/null +++ b/test/test-param-select-with-strings-only.js @@ -0,0 +1,22 @@ +var common = require("./common") + , odbc = require("../") + , db = new odbc.Database() + , assert = require("assert"); + + +db.open(common.connectionString, function (err) { + assert.equal(err, null); + + db.query("select ? as TEXTCOL, ? as TEXTCOL2, ? as TEXTCOL3" + , ["fish", "asdf", "1234"] + , function (err, data, more) { + db.close(function () { + assert.equal(err, null); + assert.deepEqual(data, [{ + TEXTCOL: 'fish', + TEXTCOL2: 'asdf', + TEXTCOL3: '1234' + }]); + }); + }); +}); diff --git a/test/test-param-select-with-unicode.js b/test/test-param-select-with-unicode.js new file mode 100644 index 00000000..c2e5b8d5 --- /dev/null +++ b/test/test-param-select-with-unicode.js @@ -0,0 +1,15 @@ +var common = require("./common") + , odbc = require("../") + , db = new odbc.Database() + , assert = require("assert") + ; + +db.open(common.connectionString, function(err) { + db.query("select ? as UNICODETEXT", ['ף צ ץ ק ר ש תכ ך ל מ ם נ ן ס ע פ 電电電買买買開开開東东東車车車'], function (err, data) { + db.close(function () { + console.log(data); + assert.equal(err, null); + assert.deepEqual(data, [{ UNICODETEXT: 'ף צ ץ ק ר ש תכ ך ל מ ם נ ן ס ע פ 電电電買买買開开開東东東車车車' }]); + }); + }); +}); diff --git a/test/test-param-select.js b/test/test-param-select.js deleted file mode 100644 index 78313679..00000000 --- a/test/test-param-select.js +++ /dev/null @@ -1,23 +0,0 @@ -var common = require("./common") - , odbc = require("../odbc.js") - , db = new odbc.Database(); - - -db.open(common.connectionString, function (err) { - if (err) { - console.error(err); - return; - } - - db.query("select * from test where col1 = ? " - , ["fish"] - , function (err, data, more) { - if (err) { - console.error(err); - process.exit(1); - } - - console.error(data); - } - ); -}); diff --git a/test/test-pool-close.js b/test/test-pool-close.js new file mode 100644 index 00000000..2a36f756 --- /dev/null +++ b/test/test-pool-close.js @@ -0,0 +1,38 @@ +var common = require("./common") + , odbc = require("../") + , pool = new odbc.Pool() + , connectionString = common.connectionString + , connections = [] + , connectCount = 10; + +openConnectionsUsingPool(connections); + +function openConnectionsUsingPool(connections) { + for (var x = 0; x <= connectCount; x++) { + + (function (connectionIndex) { + console.error("Opening connection #", connectionIndex); + + pool.open(connectionString, function (err, connection) { + //console.error("Opened connection #", connectionIndex); + + if (err) { + console.error("error: ", err.message); + return false; + } + + connections.push(connection); + + if (connectionIndex == connectCount) { + closeConnections(connections); + } + }); + })(x); + } +} + +function closeConnections (connections) { + pool.close(function () { + console.error("pool closed"); + }); +} \ No newline at end of file diff --git a/test/test-pool-connect.js b/test/test-pool-connect.js index 7b7154d3..a0f0d12b 100644 --- a/test/test-pool-connect.js +++ b/test/test-pool-connect.js @@ -1,9 +1,9 @@ var common = require("./common") - , odbc = require("../odbc.js") - , db = new odbc.Database() + , odbc = require("../") + , pool = new odbc.Pool() , connectionString = common.connectionString , connections = [] - , connectCount = 500; + , connectCount = 10; openConnectionsUsingPool(connections); @@ -11,8 +11,7 @@ function openConnectionsUsingPool(connections) { for (var x = 0; x <= connectCount; x++) { (function (connectionIndex) { - //setTimeout(function () { - //console.error("Opening connection #", connectionIndex); + console.error("Opening connection #", connectionIndex); pool.open(connectionString, function (err, connection) { //console.error("Opened connection #", connectionIndex); @@ -24,34 +23,6 @@ function openConnectionsUsingPool(connections) { connections.push(connection); - if (connectionIndex == connectCount) { - //closeConnections(connections); - } - }); - - //}, x * 50); - })(x); - } -} - -function openConnectionsUsingDB(connections) { - for (var x = 0; x <= connectCount; x++) { - - (function (connectionIndex) { - //console.error("Opening connection #", connectionIndex); - var db = new Database(); - - db.open(connectionString, function (err, connection) { - //console.error("Opened connection #", connectionIndex); - - if (err) { - console.error("error: ", err.message); - return false; - } - - connections.push(db); - //connections.push(connection); - if (connectionIndex == connectCount) { closeConnections(connections); } @@ -61,10 +32,7 @@ function openConnectionsUsingDB(connections) { } function closeConnections (connections) { - connections.forEach(function (connection, idx) { - //console.error("Closing connection #", idx); - connection.close(function () { - //console.error("Closed connection #", idx); - }); - }); -} \ No newline at end of file + pool.close(function () { + console.error("pool closed"); + }); +} diff --git a/test/test-prepare-bind-execute-closeSync.js b/test/test-prepare-bind-execute-closeSync.js new file mode 100644 index 00000000..f7c74d7c --- /dev/null +++ b/test/test-prepare-bind-execute-closeSync.js @@ -0,0 +1,62 @@ +var common = require("./common") + , odbc = require("../") + , db = new odbc.Database() + , assert = require("assert") + , iterations = 1000 + ; + +db.openSync(common.connectionString); + +issueQuery3(function () { + finish(); +}); + + +function issueQuery3(done) { + var count = 0 + , time = new Date().getTime(); + + var stmt = db.prepareSync('select ? as test'); + + for (var x = 0; x < iterations; x++) { + (function (x) { + stmt.bind([x], function (err) { + if (err) { + console.log(err); + return finish(); + } + + //console.log(x); + + stmt.execute(function (err, result) { + cb(err, result, x); + }); + }); + })(x); + } + + function cb (err, result, x) { + if (err) { + console.error(err); + return finish(); + } + + var a = result.fetchAllSync(); + + assert.deepEqual(a, [{ test : x }]); + + result.closeSync(); + + if (++count == iterations) { + var elapsed = new Date().getTime() - time; + + console.log("%d queries issued in %d seconds, %d/sec : Prepare - Bind - Execute - CloseSync", count, elapsed/1000, Math.floor(count/(elapsed/1000))); + return done(); + } + } +} + +function finish() { + db.closeSync(); + console.log("connection closed"); +} diff --git a/test/test-prepare-bind-execute-error.js b/test/test-prepare-bind-execute-error.js new file mode 100644 index 00000000..e76af75b --- /dev/null +++ b/test/test-prepare-bind-execute-error.js @@ -0,0 +1,49 @@ +var common = require("./common") + , odbc = require("../") + , db = new odbc.Database() + , assert = require("assert") + ; + +db.openSync(common.connectionString); +issueQuery(); + +function issueQuery() { + var count = 0 + , time = new Date().getTime() + , stmt + , result + , data + ; + + assert.doesNotThrow(function () { + stmt = db.prepareSync('select cast(? as datetime) as test'); + }); + + assert.throws(function () { + result = stmt.executeSync(); + }); + + assert.doesNotThrow(function () { + stmt.bindSync([0]); + }); + + assert.doesNotThrow(function () { + result = stmt.executeSync(); + }); + + assert.doesNotThrow(function () { + data = result.fetchAllSync(); + }); + + assert.ok(data); + + finish(0); +} + +function finish(exitCode) { + db.closeSync(); + + console.log("connection closed"); + + process.exit(exitCode || 0); +} diff --git a/test/test-prepare-bind-execute-long-string.js b/test/test-prepare-bind-execute-long-string.js new file mode 100644 index 00000000..ee7e65c7 --- /dev/null +++ b/test/test-prepare-bind-execute-long-string.js @@ -0,0 +1,64 @@ +var common = require("./common") + , odbc = require("../") + , db = new odbc.Database() + , assert = require("assert") + ; + +db.openSync(common.connectionString); +issueQuery(100001); +issueQuery(3000); +issueQuery(4000); +issueQuery(5000); +issueQuery(8000); +finish(0); + +function issueQuery(length) { + var count = 0 + , time = new Date().getTime() + , stmt + , result + , data + , str = '' + ; + + var set = 'abcdefghijklmnopqrstuvwxyz'; + + for (var x = 0; x < length; x++) { + str += set[x % set.length]; + } + + assert.doesNotThrow(function () { + stmt = db.prepareSync('select ? as longString'); + }); + + assert.doesNotThrow(function () { + stmt.bindSync([str]); + }); + + assert.doesNotThrow(function () { + result = stmt.executeSync(); + }); + + assert.doesNotThrow(function () { + data = result.fetchAllSync(); + }); + + console.log('expected length: %s, returned length: %s', str.length, data[0].longString.length); + + for (var x = 0; x < str.length; x++) { + if (str[x] != data[0].longString[x]) { + console.log(x, str[x], data[0].longString[x]); + + assert.equal(str[x], data[0].longString[x]); + } + } + + assert.equal(data[0].longString, str); +} + +function finish(exitCode) { + db.closeSync(); + + console.log("connection closed"); + process.exit(exitCode || 0); +} diff --git a/test/test-prepare-bindSync-execute-closeSync.js b/test/test-prepare-bindSync-execute-closeSync.js new file mode 100644 index 00000000..d2c74ca3 --- /dev/null +++ b/test/test-prepare-bindSync-execute-closeSync.js @@ -0,0 +1,46 @@ +var common = require("./common") + , odbc = require("../") + , assert = require("assert") + , db = new odbc.Database() + , iterations = 100 + ; + +db.openSync(common.connectionString); + +issueQuery3(function () { + finish(); +}); + +function issueQuery3(done) { + var count = 0 + , time = new Date().getTime(); + + var stmt = db.prepareSync('select ? as test'); + + for (var x = 0; x < iterations; x++) { + (function (x) { + stmt.bindSync([x]); + var result = stmt.executeSync() + cb(result, x); + + })(x); + } + + function cb (result, x) { + assert.deepEqual(result.fetchAllSync(), [ { test : x } ]); + + result.closeSync(); + + if (++count == iterations) { + var elapsed = new Date().getTime() - time; + + console.log("%d queries issued in %d seconds, %d/sec : Prepare - Bind - Execute - CloseSync", count, elapsed/1000, Math.floor(count/(elapsed/1000))); + return done(); + } + } +} + +function finish() { + db.closeSync(); + console.log("connection closed"); +} diff --git a/test/test-prepare.js b/test/test-prepare.js new file mode 100644 index 00000000..5b11dd45 --- /dev/null +++ b/test/test-prepare.js @@ -0,0 +1,34 @@ +var common = require("./common") + , odbc = require("../") + , db = new odbc.Database() + , assert = require("assert") + ; + +db.openSync(common.connectionString); + +assert.equal(db.connected, true); + +db.prepare("select ? as col1", function (err, stmt) { + assert.equal(err, null); + assert.equal(stmt.constructor.name, "ODBCStatement"); + + stmt.bind(["hello world"], function (err) { + assert.equal(err, null); + + stmt.execute(function (err, result) { + assert.equal(err, null); + assert.equal(result.constructor.name, "ODBCResult"); + + result.fetchAll(function (err, data) { + assert.equal(err, null); + console.log(data); + + result.closeSync(); + + db.closeSync(); + assert.deepEqual(data, [{ col1: "hello world" }]); + }); + }); + }); +}); + diff --git a/test/test-prepareSync-bad-sql.js b/test/test-prepareSync-bad-sql.js new file mode 100644 index 00000000..eaf6cf84 --- /dev/null +++ b/test/test-prepareSync-bad-sql.js @@ -0,0 +1,24 @@ +var common = require("./common") + , odbc = require("../") + , db = new odbc.Database() + , assert = require("assert") + ; + +db.openSync(common.connectionString); +assert.equal(db.connected, true); + +var stmt = db.prepareSync("asdf asdf asdf asdf sadf "); +assert.equal(stmt.constructor.name, "ODBCStatement"); + +stmt.bindSync(["hello world", 1, null]); + +stmt.execute(function (err, result) { + assert.ok(err); + + stmt.executeNonQuery(function (err, count) { + assert.ok(err); + + db.close(function () {}); + }); +}); + diff --git a/test/test-prepareSync-multiple-execution.js b/test/test-prepareSync-multiple-execution.js new file mode 100644 index 00000000..144c894c --- /dev/null +++ b/test/test-prepareSync-multiple-execution.js @@ -0,0 +1,69 @@ +var common = require("./common") + , odbc = require("../") + , db = new odbc.Database() + , assert = require("assert") + ; + +var count = 0; +var iterations = 10; + +db.openSync(common.connectionString); + +common.dropTables(db, function () { + common.createTables(db, function (err, data) { + if (err) { + console.log(err); + + return finish(2); + } + + var stmt = db.prepareSync("insert into " + common.tableName + " (colint, coltext) VALUES (?, ?)"); + assert.equal(stmt.constructor.name, "ODBCStatement"); + + recursive(stmt); + }); +}); + +function finish(retValue) { + console.log("finish exit value: %s", retValue); + + db.closeSync(); + process.exit(retValue || 0); +} + +function recursive (stmt) { + try { + var result = stmt.bindSync([4, 'hello world']); + assert.equal(result, true); + } + catch (e) { + console.log(e.message); + finish(3); + } + + stmt.execute(function (err, result) { + if (err) { + console.log(err.message); + + return finish(4); + } + + result.closeSync(); + count += 1; + + console.log("count %s, iterations %s", count, iterations); + + if (count <= iterations) { + setTimeout(function(){ + recursive(stmt); + },100); + } + else { + console.log(db.querySync("select * from " + common.tableName)); + + common.dropTables(db, function () { + return finish(0); + }); + } + }); +} diff --git a/test/test-prepareSync.js b/test/test-prepareSync.js new file mode 100644 index 00000000..8e83d54a --- /dev/null +++ b/test/test-prepareSync.js @@ -0,0 +1,29 @@ +var common = require("./common") + , odbc = require("../") + , db = new odbc.Database() + , assert = require("assert") + ; + +db.openSync(common.connectionString); +assert.equal(db.connected, true); + +var stmt = db.prepareSync("select ? as col1, ? as col2, ? as col3"); +assert.equal(stmt.constructor.name, "ODBCStatement"); + +stmt.bindSync(["hello world", 1, null]); + +stmt.execute(function (err, result) { + assert.equal(err, null); + assert.equal(result.constructor.name, "ODBCResult"); + + result.fetchAll(function (err, data) { + assert.equal(err, null); + console.log(data); + + result.closeSync(); + + db.closeSync(); + assert.deepEqual(data, [{ col1: "hello world", col2 : 1, col3 : null }]); + }); +}); + diff --git a/test/test-query-create-table-fetchSync.js b/test/test-query-create-table-fetchSync.js new file mode 100644 index 00000000..7fa2ed98 --- /dev/null +++ b/test/test-query-create-table-fetchSync.js @@ -0,0 +1,23 @@ +var common = require("./common") + , odbc = require("../") + , db = new odbc.Database() + , assert = require("assert") + ; + +db.openSync(common.connectionString); + +db.queryResult("create table " + common.tableName + " (COLINT INTEGER, COLDATETIME DATETIME, COLTEXT TEXT)", function (err, result) { + console.log(arguments); + + try { + //this should throw because there was no result to be had? + var data = result.fetchAllSync(); + console.log(data); + } + catch (e) { + console.log(e.stack); + } + + db.closeSync(); +}); + diff --git a/test/test-query-create-table.js b/test/test-query-create-table.js index 39597046..bc3f7a26 100644 --- a/test/test-query-create-table.js +++ b/test/test-query-create-table.js @@ -1,15 +1,12 @@ var common = require("./common") - , odbc = require("../odbc.js") - , db = new odbc.Database(); + , odbc = require("../") + , db = new odbc.Database() + , assert = require("assert") + ; -db.open(common.connectionString, function(err) -{ - db.query("create table test (col1 varchar(50), col2 varchar(20))", function (err, data) { - if (err) { - console.error(err); - process.exit(1); - } - - console.error(data); - }); +db.openSync(common.connectionString); +common.createTables(db, function (err, data, morefollowing) { + console.log(arguments); + db.closeSync(); }); + diff --git a/test/test-query-drop-table.js b/test/test-query-drop-table.js index b2638c50..376ea1d9 100644 --- a/test/test-query-drop-table.js +++ b/test/test-query-drop-table.js @@ -1,15 +1,13 @@ var common = require("./common") - , odbc = require("../odbc.js") - , db = new odbc.Database(); + , odbc = require("../") + , db = new odbc.Database() + , assert = require("assert") + ; -db.open(common.connectionString, function(err) -{ - db.query("drop table test", function (err, data) { - if (err) { - console.error(err); - process.exit(1); - } - - console.error(data); - }); +db.openSync(common.connectionString); +common.dropTables(db, function (err, data) { + db.closeSync(); + assert.equal(err, null); + assert.deepEqual(data, []); }); + diff --git a/test/test-query-insert.js b/test/test-query-insert.js index ab10dfe3..0369d8e4 100644 --- a/test/test-query-insert.js +++ b/test/test-query-insert.js @@ -1,33 +1,34 @@ var common = require("./common") - , odbc = require("../odbc.js") - , db = new odbc.Database(); + , odbc = require("../") + , db = new odbc.Database() + , assert = require("assert") + , insertCount = 0; + ; -db.open(common.connectionString, function(err) -{ - db.query("insert into test (col1) values ('sandwich')", function (err, data) { - if (err) { - console.error(err); - process.exit(1); - } - - console.error(data); - }); - - db.query("insert into test (col1) values ('fish')", function (err, data) { - if (err) { - console.error(err); - process.exit(1); - } - - console.error(data); - }); - - db.query("insert into test (col1) values ('scarf')", function (err, data) { - if (err) { - console.error(err); - process.exit(1); - } - - console.error(data); - }); +db.open(common.connectionString, function(err) { + common.dropTables(db, function () { + common.createTables(db, function (err) { + assert.equal(err, null); + + db.query("insert into " + common.tableName + " (COLTEXT) values ('sandwich')", insertCallback); + db.query("insert into " + common.tableName + " (COLTEXT) values ('fish')", insertCallback); + db.query("insert into " + common.tableName + " (COLTEXT) values ('scarf')", insertCallback); + + }); + }); }); + +function insertCallback(err, data) { + assert.equal(err, null); + assert.deepEqual(data, []); + + insertCount += 1; + + if (insertCount === 3) { + common.dropTables(db, function () { + db.close(function () { + console.error("Done"); + }); + }); + } +} \ No newline at end of file diff --git a/test/test-query-select-fetch.js b/test/test-query-select-fetch.js new file mode 100644 index 00000000..0fc356f5 --- /dev/null +++ b/test/test-query-select-fetch.js @@ -0,0 +1,19 @@ +var common = require("./common") + , odbc = require("../") + , db = new odbc.Database() + , assert = require("assert") + ; + +db.openSync(common.connectionString); +assert.equal(db.connected, true); + +db.queryResult("select 1 as COLINT, 'some test' as COLTEXT ", function (err, result) { + assert.equal(err, null); + assert.equal(result.constructor.name, "ODBCResult"); + + result.fetch(function (err, data) { + db.closeSync(); + assert.deepEqual(data, { COLINT: '1', COLTEXT: 'some test' }); + }); +}); + diff --git a/test/test-query-select-fetchAll.js b/test/test-query-select-fetchAll.js new file mode 100644 index 00000000..12450968 --- /dev/null +++ b/test/test-query-select-fetchAll.js @@ -0,0 +1,23 @@ +var common = require("./common") + , odbc = require("../") + , db = new odbc.Database() + , assert = require("assert") + ; + +db.openSync(common.connectionString); + +assert.equal(db.connected, true); + +db.queryResult("select 1 as COLINT, 'some test' as COLTEXT union select 2, 'something else' ", function (err, result) { + assert.equal(err, null); + assert.equal(result.constructor.name, "ODBCResult"); + + result.fetchAll(function (err, data) { + db.closeSync(); + assert.deepEqual(data, [ + {"COLINT":1,"COLTEXT":"some test"} + ,{"COLINT":2,"COLTEXT":"something else"} + ]); + }); +}); + diff --git a/test/test-query-select-fetchAllSync.js b/test/test-query-select-fetchAllSync.js new file mode 100644 index 00000000..68eaa57e --- /dev/null +++ b/test/test-query-select-fetchAllSync.js @@ -0,0 +1,23 @@ +var common = require("./common") + , odbc = require("../") + , db = new odbc.Database() + , assert = require("assert") + ; + +db.openSync(common.connectionString); + +assert.equal(db.connected, true); + +db.queryResult("select 1 as COLINT, 'some test' as COLTEXT union select 2, 'something else' ", function (err, result) { + assert.equal(err, null); + assert.equal(result.constructor.name, "ODBCResult"); + + var data = result.fetchAllSync(); + + db.closeSync(); + assert.deepEqual(data, [ + {"COLINT":1,"COLTEXT":"some test"} + ,{"COLINT":2,"COLTEXT":"something else"} + ]); +}); + diff --git a/test/test-query-select-fetchMode-array.js b/test/test-query-select-fetchMode-array.js new file mode 100644 index 00000000..33cb72b3 --- /dev/null +++ b/test/test-query-select-fetchMode-array.js @@ -0,0 +1,17 @@ +var common = require("./common") + , odbc = require("../") + , db = odbc({ fetchMode : odbc.FETCH_ARRAY }) + , assert = require("assert") + ; + +db.openSync(common.connectionString); + +assert.equal(db.connected, true); + +db.query("select 1 as COLINT, 'some test' as COLTEXT ", function (err, data) { + assert.equal(err, null); + + db.closeSync(); + assert.deepEqual(data, [[1,"some test"]]); +}); + diff --git a/test/test-query-select-unicode.js b/test/test-query-select-unicode.js new file mode 100644 index 00000000..751b4644 --- /dev/null +++ b/test/test-query-select-unicode.js @@ -0,0 +1,15 @@ +var common = require("./common") + , odbc = require("../") + , db = new odbc.Database() + , assert = require("assert") + ; + +db.openSync(common.connectionString); + +db.query("select '☯ąčęėįšųūž☎áäàéêèóöòüßÄÖÜ€ шчябы Ⅲ ❤' as UNICODETEXT", function (err, data) { + db.closeSync(); + console.log(data); + assert.equal(err, null); + assert.deepEqual(data, [{ UNICODETEXT: '☯ąčęėįšųūž☎áäàéêèóöòüßÄÖÜ€ шчябы Ⅲ ❤' }]); +}); + diff --git a/test/test-query-select.js b/test/test-query-select.js index 8bd1dfbd..39916d1b 100644 --- a/test/test-query-select.js +++ b/test/test-query-select.js @@ -1,15 +1,14 @@ var common = require("./common") - , odbc = require("../odbc.js") - , db = new odbc.Database(); + , odbc = require("../") + , db = new odbc.Database() + , assert = require("assert") + ; -db.open(common.connectionString, function(err) -{ - db.query('select * from test', function (err, data) { - if (err) { - console.error(err); - process.exit(1); - } - - console.error(data); - }); +db.openSync(common.connectionString); + +db.query("select 1 as \"COLINT\", 'some test' as \"COLTEXT\"", function (err, data) { + db.closeSync(); + assert.equal(err, null); + assert.deepEqual(data, [{ COLINT: '1', COLTEXT: 'some test' }]); }); + diff --git a/test/test-queryResultSync-getColumnNamesSync.js b/test/test-queryResultSync-getColumnNamesSync.js new file mode 100644 index 00000000..67d95d95 --- /dev/null +++ b/test/test-queryResultSync-getColumnNamesSync.js @@ -0,0 +1,14 @@ +var common = require("./common") + , odbc = require("../") + , db = new odbc.Database() + , assert = require("assert") + ; + +db.openSync(common.connectionString); +assert.equal(db.connected, true); + +var rs = db.queryResultSync("select 1 as SomeIntField, 'string' as someStringField"); + +assert.deepEqual(rs.getColumnNamesSync(), ['SomeIntField', 'someStringField']); + +db.closeSync(); diff --git a/test/test-queryResultSync-getRowCount.js b/test/test-queryResultSync-getRowCount.js new file mode 100644 index 00000000..b6fa2d64 --- /dev/null +++ b/test/test-queryResultSync-getRowCount.js @@ -0,0 +1,34 @@ +var common = require("./common") + , odbc = require("../") + , db = new odbc.Database() + , assert = require("assert") + ; + +db.openSync(common.connectionString); +assert.equal(db.connected, true); + +common.dropTables(db, function () { + common.createTables(db, function (err, data) { + if (err) { + console.log(err); + + return finish(2); + } + + var rs = db.queryResultSync("insert into " + common.tableName + " (colint, coltext) VALUES (100, 'hello world')"); + assert.equal(rs.constructor.name, "ODBCResult"); + + assert.equal(rs.getRowCountSync(), 1); + + common.dropTables(db, function () { + return finish(0); + }); + }); +}); + +function finish(retValue) { + console.log("finish exit value: %s", retValue); + + db.closeSync(); + process.exit(retValue || 0); +} diff --git a/test/test-querySync-select-unicode.js b/test/test-querySync-select-unicode.js new file mode 100644 index 00000000..21c6b663 --- /dev/null +++ b/test/test-querySync-select-unicode.js @@ -0,0 +1,20 @@ +var common = require("./common") + , odbc = require("../") + , db = new odbc.Database() + , assert = require("assert") + ; + +db.openSync(common.connectionString); +var data; + +try { + data = db.querySync("select 'ꜨꜢ' as UNICODETEXT"); +} +catch (e) { + console.log(e.stack); +} + +db.closeSync(); +console.log(data); +assert.deepEqual(data, [{ UNICODETEXT: 'ꜨꜢ' }]); + diff --git a/test/test-querySync-select-with-execption.js b/test/test-querySync-select-with-execption.js new file mode 100644 index 00000000..b53cbe42 --- /dev/null +++ b/test/test-querySync-select-with-execption.js @@ -0,0 +1,22 @@ +var common = require("./common") + , odbc = require("../") + , db = new odbc.Database() + , assert = require("assert") + ; + +db.openSync(common.connectionString); +assert.equal(db.connected, true); +var err = null; + +try { + var data = db.querySync("select invalid query"); +} +catch (e) { + console.log(e.stack); + + err = e; +} + +db.closeSync(); +assert.equal(err.error, "[node-odbc] Error in ODBCConnection::QuerySync"); + diff --git a/test/test-querySync-select.js b/test/test-querySync-select.js new file mode 100644 index 00000000..7f14fa3c --- /dev/null +++ b/test/test-querySync-select.js @@ -0,0 +1,15 @@ +var common = require("./common") + , odbc = require("../") + , db = new odbc.Database() + , assert = require("assert") + ; + +db.openSync(common.connectionString); +assert.equal(db.connected, true); + +var data = db.querySync("select 1 as \"COLINT\", 'some test' as \"COLTEXT\""); + +db.closeSync(); +assert.deepEqual(data, [{ COLINT: 1, COLTEXT: 'some test' }]); + + diff --git a/test/test-require-and-end.js b/test/test-require-and-end.js new file mode 100644 index 00000000..37153945 --- /dev/null +++ b/test/test-require-and-end.js @@ -0,0 +1,8 @@ +var odbc = require("../") + ; + +//This test should just exit. This tests an issue where +//the C++ ODBC::Init function was causing the event loop to +//stay alive + +console.log("done"); \ No newline at end of file diff --git a/test/test-transaction-commit-sync.js b/test/test-transaction-commit-sync.js new file mode 100644 index 00000000..1880956b --- /dev/null +++ b/test/test-transaction-commit-sync.js @@ -0,0 +1,54 @@ +var common = require("./common") + , odbc = require("../") + , db = new odbc.Database() + , assert = require("assert") + , exitCode = 0 + ; + + +db.openSync(common.connectionString); + +common.createTables(db, function (err, data) { + try { + db.beginTransactionSync(); + + var results = db.querySync("insert into " + common.tableName + " (COLINT, COLDATETIME, COLTEXT) VALUES (42, null, null)" ); + + db.rollbackTransactionSync(); + + results = db.querySync("select * from " + common.tableName); + + assert.deepEqual(results, []); + } + catch (e) { + console.log("Failed when rolling back"); + console.log(e.stack); + exitCode = 1 + } + + try { + //Start a new transaction + db.beginTransactionSync(); + + result = db.querySync("insert into " + common.tableName + " (COLINT, COLDATETIME, COLTEXT) VALUES (42, null, null)" ); + + db.commitTransactionSync(); //commit + + result = db.querySync("select * from " + common.tableName); + + assert.deepEqual(result, [ { COLINT: 42, COLDATETIME: null, COLTEXT: null } ]); + } + catch (e) { + console.log("Failed when committing"); + console.log(e.stack); + + exitCode = 2; + } + + common.dropTables(db, function (err) { + db.closeSync(); + process.exit(exitCode); + }); +}); + + diff --git a/test/test-transaction-commit.js b/test/test-transaction-commit.js new file mode 100644 index 00000000..cc4f425d --- /dev/null +++ b/test/test-transaction-commit.js @@ -0,0 +1,77 @@ +var common = require("./common") + , odbc = require("../") + , db = new odbc.Database() + , assert = require("assert") + , exitCode = 0 + ; + + +db.openSync(common.connectionString); + +common.createTables(db, function (err, data) { + test1() + + function test1() { + db.beginTransaction(function (err) { + if (err) { + console.log("Error beginning transaction."); + console.log(err); + exitCode = 1 + } + + var result = db.querySync("insert into " + common.tableName + " (COLINT, COLDATETIME, COLTEXT) VALUES (42, null, null)" ); + + //rollback + db.endTransaction(true, function (err) { + if (err) { + console.log("Error rolling back transaction"); + console.log(err); + exitCode = 2 + } + + data = db.querySync("select * from " + common.tableName); + + assert.deepEqual(data, []); + + test2(); + }); + }); + } + + function test2 () { + //Start a new transaction + db.beginTransaction(function (err) { + if (err) { + console.log("Error beginning transaction"); + console.log(err); + exitCode = 3 + } + + result = db.querySync("insert into " + common.tableName + " (COLINT, COLDATETIME, COLTEXT) VALUES (42, null, null)" ); + + //commit + db.endTransaction(false, function (err) { + if (err) { + console.log("Error committing transaction"); + console.log(err); + exitCode = 3 + } + + data = db.querySync("select * from " + common.tableName); + + assert.deepEqual(data, [ { COLINT: 42, COLDATETIME: null, COLTEXT: null } ]); + + finish(); + }); + }); + } + + function finish() { + common.dropTables(db, function (err) { + db.closeSync(); + process.exit(exitCode); + }); + } +}); + + diff --git a/wscript b/wscript deleted file mode 100644 index 78ead27f..00000000 --- a/wscript +++ /dev/null @@ -1,16 +0,0 @@ -def set_options(opt): - opt.tool_options("compiler_cxx") - -def configure(conf): - conf.check_tool("compiler_cxx") - conf.check_tool("node_addon") - if not conf.check(lib="odbc", libpath=['/usr/local/lib', '/opt/local/lib'], uselib_store="ODBC"): - conf.fatal('Missing libodbc'); - conf.env.append_value('LIBPATH_ODBC', '/opt/local/lib'); - -def build(bld): - obj = bld.new_task_gen("cxx", "shlib", "node_addon") - obj.cxxflags = ["-g", "-D_FILE_OFFSET_BITS=64", "-D_LARGEFILE_SOURCE", "-Wall"] - obj.target = "odbc_bindings" - obj.source = "src/Database.cpp" - obj.uselib = "ODBC" \ No newline at end of file