DBD-ODBC-1.61/0000755000175000017500000000000013614770375012122 5ustar martinmartinDBD-ODBC-1.61/README.hpux0000644000175000017500000000167512254016141013755 0ustar martinmartinHopefully this will help someone compiling for HP/UX. I pulled this off dbi-users from Jonathon Leffler. If someone using HP/UX can refine this for me, I'd be grateful. Regards, Jeff > Here is the error that I am getting after running the make test: > > ====================================================================== > ====== > ============= > t/t00basic........../usr/lib/dld.sl: Can't shl_load() a library containing > Thread Local Storage: /usr/lib/libcl.2 Hi, If you look in Notes/environment.variables file in the DBD::Informix distribution, you will see a variable DBD_INFORMIX_HPUX_USELIBCL. The documentation refers to libcl.1; you're running into the equivalent problem with libcl.2. I suggest that you track down where that is used (Makefile.PL), and change the reference to '-l:libcl.1' to '-l:libcl.\d' (should be good for a few years, anyway). As far as I can tell, the libcl library provides nothing of value to ClientSDK. DBD-ODBC-1.61/ConvertUTF.c0000644000175000017500000005062112250310263014246 0ustar martinmartin#ifdef WITH_UNICODE /* * Copyright 2001-2004 Unicode, Inc. * * Disclaimer * * This source code is provided as is by Unicode, Inc. No claims are * made as to fitness for any particular purpose. No warranties of any * kind are expressed or implied. The recipient agrees to determine * applicability of information provided. If this file has been * purchased on magnetic or optical media from Unicode, Inc., the * sole remedy for any claim will be exchange of defective media * within 90 days of receipt. * * Limitations on Rights to Redistribute This Code * * Unicode, Inc. hereby grants the right to freely use the information * supplied in this file in the creation of products supporting the * Unicode Standard, and to make copies of this file in any form * for internal or external distribution as long as this notice * remains attached. */ /* * NOTE: The original version of this code can be found at * http://www.unicode.org/Public/PROGRAMS/CVTUTF/ * This version was slightly modified to allow ConvertUTF8toUTF16 and * ConvertUTF16toUTF8 to calculate the bytes required without writing to * the target buffer. * */ /* --------------------------------------------------------------------- Conversions between UTF32, UTF-16, and UTF-8. Source code file. Author: Mark E. Davis, 1994. Rev History: Rick McGowan, fixes & updates May 2001. Sept 2001: fixed const & error conditions per mods suggested by S. Parent & A. Lillich. June 2002: Tim Dodd added detection and handling of incomplete source sequences, enhanced error detection, added casts to eliminate compiler warnings. July 2003: slight mods to back out aggressive FFFE detection. Jan 2004: updated switches in from-UTF8 conversions. Oct 2004: updated to use UNI_MAX_LEGAL_UTF32 in UTF-32 conversions. See the header file "ConvertUTF.h" for complete documentation. ------------------------------------------------------------------------ */ #include "ConvertUTF.h" #ifdef CVTUTF_DEBUG #include #endif static const int halfShift = 10; /* used for shifting by 10 bits */ static const UTF32 halfBase = 0x0010000UL; static const UTF32 halfMask = 0x3FFUL; #define UNI_SUR_HIGH_START (UTF32)0xD800 #define UNI_SUR_HIGH_END (UTF32)0xDBFF #define UNI_SUR_LOW_START (UTF32)0xDC00 #define UNI_SUR_LOW_END (UTF32)0xDFFF #define false 0 #define true 1 /* --------------------------------------------------------------------- */ ConversionResult ConvertUTF32toUTF16 ( const UTF32** sourceStart, const UTF32* sourceEnd, UTF16** targetStart, UTF16* targetEnd, ConversionFlags flags) { ConversionResult result = conversionOK; const UTF32* source = *sourceStart; UTF16* target = *targetStart; while (source < sourceEnd) { UTF32 ch; if (target >= targetEnd) { result = targetExhausted; break; } ch = *source++; if (ch <= UNI_MAX_BMP) { /* Target is a character <= 0xFFFF */ /* UTF-16 surrogate values are illegal in UTF-32; 0xffff or 0xfffe are both reserved values */ if (ch >= UNI_SUR_HIGH_START && ch <= UNI_SUR_LOW_END) { if (flags == strictConversion) { --source; /* return to the illegal value itself */ result = sourceIllegal; break; } else { *target++ = UNI_REPLACEMENT_CHAR; } } else { *target++ = (UTF16)ch; /* normal case */ } } else if (ch > UNI_MAX_LEGAL_UTF32) { if (flags == strictConversion) { result = sourceIllegal; } else { *target++ = UNI_REPLACEMENT_CHAR; } } else { /* target is a character in range 0xFFFF - 0x10FFFF. */ if (target + 1 >= targetEnd) { --source; /* Back up source pointer! */ result = targetExhausted; break; } ch -= halfBase; *target++ = (UTF16)((ch >> halfShift) + UNI_SUR_HIGH_START); *target++ = (UTF16)((ch & halfMask) + UNI_SUR_LOW_START); } } *sourceStart = source; *targetStart = target; return result; } /* --------------------------------------------------------------------- */ ConversionResult ConvertUTF16toUTF32 ( const UTF16** sourceStart, const UTF16* sourceEnd, UTF32** targetStart, UTF32* targetEnd, ConversionFlags flags) { ConversionResult result = conversionOK; const UTF16* source = *sourceStart; UTF32* target = *targetStart; UTF32 ch, ch2; while (source < sourceEnd) { const UTF16* oldSource = source; /* In case we have to back up because of target overflow. */ ch = *source++; /* If we have a surrogate pair, convert to UTF32 first. */ if (ch >= UNI_SUR_HIGH_START && ch <= UNI_SUR_HIGH_END) { /* If the 16 bits following the high surrogate are in the source buffer... */ if (source < sourceEnd) { ch2 = *source; /* If it's a low surrogate, convert to UTF32. */ if (ch2 >= UNI_SUR_LOW_START && ch2 <= UNI_SUR_LOW_END) { ch = ((ch - UNI_SUR_HIGH_START) << halfShift) + (ch2 - UNI_SUR_LOW_START) + halfBase; ++source; } else if (flags == strictConversion) { /* it's an unpaired high surrogate */ --source; /* return to the illegal value itself */ result = sourceIllegal; break; } } else { /* We don't have the 16 bits following the high surrogate. */ --source; /* return to the high surrogate */ result = sourceExhausted; break; } } else if (flags == strictConversion) { /* UTF-16 surrogate values are illegal in UTF-32 */ if (ch >= UNI_SUR_LOW_START && ch <= UNI_SUR_LOW_END) { --source; /* return to the illegal value itself */ result = sourceIllegal; break; } } if (target >= targetEnd) { source = oldSource; /* Back up source pointer! */ result = targetExhausted; break; } *target++ = ch; } *sourceStart = source; *targetStart = target; #ifdef CVTUTF_DEBUG if (result == sourceIllegal) { fprintf(stderr, "ConvertUTF16toUTF32 illegal seq 0x%04x,%04x\n", ch, ch2); fflush(stderr); } #endif return result; } /* --------------------------------------------------------------------- */ /* * Index into the table below with the first byte of a UTF-8 sequence to * get the number of trailing bytes that are supposed to follow it. * Note that *legal* UTF-8 values can't have 4 or 5-bytes. The table is * left as-is for anyone who may want to do such conversion, which was * allowed in earlier algorithms. */ static const char trailingBytesForUTF8[256] = { 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, 2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2, 3,3,3,3,3,3,3,3,4,4,4,4,5,5,5,5 }; /* * Magic values subtracted from a buffer value during UTF8 conversion. * This table contains as many values as there might be trailing bytes * in a UTF-8 sequence. */ static const UTF32 offsetsFromUTF8[6] = { 0x00000000UL, 0x00003080UL, 0x000E2080UL, 0x03C82080UL, 0xFA082080UL, 0x82082080UL }; /* * Once the bits are split out into bytes of UTF-8, this is a mask OR-ed * into the first byte, depending on how many bytes follow. There are * as many entries in this table as there are UTF-8 sequence types. * (I.e., one byte sequence, two byte... etc.). Remember that sequencs * for *legal* UTF-8 will be 4 or fewer bytes total. */ static const UTF8 firstByteMark[7] = { 0x00, 0x00, 0xC0, 0xE0, 0xF0, 0xF8, 0xFC }; /* --------------------------------------------------------------------- */ /* The interface converts a whole buffer to avoid function-call overhead. * Constants have been gathered. Loops & conditionals have been removed as * much as possible for efficiency, in favor of drop-through switches. * (See "Note A" at the bottom of the file for equivalent code.) * If your compiler supports it, the "isLegalUTF8" call can be turned * into an inline function. */ /* --------------------------------------------------------------------- */ ConversionResult ConvertUTF16toUTF8 ( const UTF16** sourceStart, const UTF16* sourceEnd, UTF8** targetStart, UTF8* targetEnd, ConversionFlags flags, unsigned int *bytes_written) { ConversionResult result = conversionOK; const UTF16* source = *sourceStart; UTF8* target; target = targetStart ? *targetStart : 0; *bytes_written = 0; while (source < sourceEnd) { UTF32 ch; unsigned short bytesToWrite = 0; const UTF32 byteMask = 0xBF; const UTF32 byteMark = 0x80; const UTF16* oldSource = source; /* In case we have to back up because of target overflow. */ ch = *source++; /* If we have a surrogate pair, convert to UTF32 first. */ if (ch >= UNI_SUR_HIGH_START && ch <= UNI_SUR_HIGH_END) { /* If the 16 bits following the high surrogate are in the source buffer... */ if (source < sourceEnd) { UTF32 ch2 = *source; /* If it's a low surrogate, convert to UTF32. */ if (ch2 >= UNI_SUR_LOW_START && ch2 <= UNI_SUR_LOW_END) { ch = ((ch - UNI_SUR_HIGH_START) << halfShift) + (ch2 - UNI_SUR_LOW_START) + halfBase; ++source; } else if (flags == strictConversion) { /* it's an unpaired high surrogate */ --source; /* return to the illegal value itself */ result = sourceIllegal; break; } } else { /* We don't have the 16 bits following the high surrogate. */ --source; /* return to the high surrogate */ result = sourceExhausted; break; } } else if (flags == strictConversion) { /* UTF-16 surrogate values are illegal in UTF-32 */ if (ch >= UNI_SUR_LOW_START && ch <= UNI_SUR_LOW_END) { --source; /* return to the illegal value itself */ result = sourceIllegal; break; } } /* Figure out how many bytes the result will require */ if (ch < (UTF32)0x80) { bytesToWrite = 1; } else if (ch < (UTF32)0x800) { bytesToWrite = 2; } else if (ch < (UTF32)0x10000) { bytesToWrite = 3; } else if (ch < (UTF32)0x110000) { bytesToWrite = 4; } else { bytesToWrite = 3; ch = UNI_REPLACEMENT_CHAR; } target += bytesToWrite; if (targetStart) { if (target > targetEnd) { source = oldSource; /* Back up source pointer! */ target -= bytesToWrite; result = targetExhausted; break; } switch (bytesToWrite) { /* note: everything falls through. */ case 4: *--target = (UTF8)((ch | byteMark) & byteMask); ch >>= 6; case 3: *--target = (UTF8)((ch | byteMark) & byteMask); ch >>= 6; case 2: *--target = (UTF8)((ch | byteMark) & byteMask); ch >>= 6; case 1: *--target = (UTF8)(ch | firstByteMark[bytesToWrite]); } } *bytes_written = (*bytes_written) + bytesToWrite; target += bytesToWrite; } *sourceStart = source; if (targetStart) *targetStart = target; return result; } /* --------------------------------------------------------------------- */ /* * Utility routine to tell whether a sequence of bytes is legal UTF-8. * This must be called with the length pre-determined by the first byte. * If not calling this from ConvertUTF8to*, then the length can be set by: * length = trailingBytesForUTF8[*source]+1; * and the sequence is illegal right away if there aren't that many bytes * available. * If presented with a length > 4, this returns false. The Unicode * definition of UTF-8 goes up to 4-byte sequences. */ static Boolean isLegalUTF8(const UTF8 *source, int length) { UTF8 a; const UTF8 *srcptr = source+length; switch (length) { default: return false; /* Everything else falls through when "true"... */ case 4: if ((a = (*--srcptr)) < 0x80 || a > 0xBF) return false; case 3: if ((a = (*--srcptr)) < 0x80 || a > 0xBF) return false; case 2: if ((a = (*--srcptr)) > 0xBF) return false; switch (*source) { /* no fall-through in this inner switch */ case 0xE0: if (a < 0xA0) return false; break; case 0xED: if (a > 0x9F) return false; break; case 0xF0: if (a < 0x90) return false; break; case 0xF4: if (a > 0x8F) return false; break; default: if (a < 0x80) return false; } case 1: if (*source >= 0x80 && *source < 0xC2) return false; } if (*source > 0xF4) return false; return true; } /* --------------------------------------------------------------------- */ /* * Exported function to return whether a UTF-8 sequence is legal or not. * This is not used here; it's just exported. */ Boolean isLegalUTF8Sequence(const UTF8 *source, const UTF8 *sourceEnd) { int length = trailingBytesForUTF8[*source]+1; if (source+length > sourceEnd) { return false; } return isLegalUTF8(source, length); } /* --------------------------------------------------------------------- */ ConversionResult ConvertUTF8toUTF16 ( const UTF8** sourceStart, const UTF8* sourceEnd, UTF16** targetStart, UTF16* targetEnd, ConversionFlags flags, unsigned int *bytes_written) { ConversionResult result = conversionOK; const UTF8* source = *sourceStart; UTF16* target = targetStart ? *targetStart : 0; *bytes_written = 0; while (source < sourceEnd) { UTF32 ch = 0; unsigned short extraBytesToRead = trailingBytesForUTF8[*source]; if (source + extraBytesToRead >= sourceEnd) { result = sourceExhausted; break; } /* Do this check whether lenient or strict */ if (! isLegalUTF8(source, extraBytesToRead+1)) { result = sourceIllegal; break; } /* * The cases all fall through. See "Note A" below. */ switch (extraBytesToRead) { case 5: ch += *source++; ch <<= 6; /* remember, illegal UTF-8 */ case 4: ch += *source++; ch <<= 6; /* remember, illegal UTF-8 */ case 3: ch += *source++; ch <<= 6; case 2: ch += *source++; ch <<= 6; case 1: ch += *source++; ch <<= 6; case 0: ch += *source++; } ch -= offsetsFromUTF8[extraBytesToRead]; if (targetStart) { if (target >= targetEnd) { source -= (extraBytesToRead+1); /* Back up source pointer! */ result = targetExhausted; break; } } if (ch <= UNI_MAX_BMP) { /* Target is a character <= 0xFFFF */ /* UTF-16 surrogate values are illegal in UTF-32 */ if (ch >= UNI_SUR_HIGH_START && ch <= UNI_SUR_LOW_END) { if (flags == strictConversion) { source -= (extraBytesToRead+1); /* return to the illegal value itself */ result = sourceIllegal; break; } else { if (targetStart) *target = UNI_REPLACEMENT_CHAR; target++; *bytes_written = (*bytes_written) + sizeof(UTF16); } } else { if (targetStart) *target = (UTF16)ch; /* normal case */ target++; *bytes_written = (*bytes_written) + sizeof(UTF16); } } else if (ch > UNI_MAX_UTF16) { if (flags == strictConversion) { result = sourceIllegal; source -= (extraBytesToRead+1); /* return to the start */ break; /* Bail out; shouldn't continue */ } else { if (targetStart) *target = UNI_REPLACEMENT_CHAR; target++; *bytes_written = (*bytes_written) + sizeof(UTF16); } } else { /* target is a character in range 0xFFFF - 0x10FFFF. */ if (targetStart) { if (target + 1 >= targetEnd) { source -= (extraBytesToRead+1); /* Back up source pointer! */ result = targetExhausted; break; } } ch -= halfBase; if (targetStart) { *target++ = (UTF16)((ch >> halfShift) + UNI_SUR_HIGH_START); *target++ = (UTF16)((ch & halfMask) + UNI_SUR_LOW_START); } else { target += 2; *bytes_written = (*bytes_written) + 2 * sizeof(UTF16); } } } *sourceStart = source; if (targetStart) *targetStart = target; return result; } /* --------------------------------------------------------------------- */ ConversionResult ConvertUTF32toUTF8 ( const UTF32** sourceStart, const UTF32* sourceEnd, UTF8** targetStart, UTF8* targetEnd, ConversionFlags flags) { ConversionResult result = conversionOK; const UTF32* source = *sourceStart; UTF8* target = *targetStart; while (source < sourceEnd) { UTF32 ch; unsigned short bytesToWrite = 0; const UTF32 byteMask = 0xBF; const UTF32 byteMark = 0x80; ch = *source++; if (flags == strictConversion ) { /* UTF-16 surrogate values are illegal in UTF-32 */ if (ch >= UNI_SUR_HIGH_START && ch <= UNI_SUR_LOW_END) { --source; /* return to the illegal value itself */ result = sourceIllegal; break; } } /* * Figure out how many bytes the result will require. Turn any * illegally large UTF32 things (> Plane 17) into replacement chars. */ if (ch < (UTF32)0x80) { bytesToWrite = 1; } else if (ch < (UTF32)0x800) { bytesToWrite = 2; } else if (ch < (UTF32)0x10000) { bytesToWrite = 3; } else if (ch <= UNI_MAX_LEGAL_UTF32) { bytesToWrite = 4; } else { bytesToWrite = 3; ch = UNI_REPLACEMENT_CHAR; result = sourceIllegal; } target += bytesToWrite; if (target > targetEnd) { --source; /* Back up source pointer! */ target -= bytesToWrite; result = targetExhausted; break; } switch (bytesToWrite) { /* note: everything falls through. */ case 4: *--target = (UTF8)((ch | byteMark) & byteMask); ch >>= 6; case 3: *--target = (UTF8)((ch | byteMark) & byteMask); ch >>= 6; case 2: *--target = (UTF8)((ch | byteMark) & byteMask); ch >>= 6; case 1: *--target = (UTF8) (ch | firstByteMark[bytesToWrite]); } target += bytesToWrite; } *sourceStart = source; *targetStart = target; return result; } /* --------------------------------------------------------------------- */ ConversionResult ConvertUTF8toUTF32 ( const UTF8** sourceStart, const UTF8* sourceEnd, UTF32** targetStart, UTF32* targetEnd, ConversionFlags flags) { ConversionResult result = conversionOK; const UTF8* source = *sourceStart; UTF32* target = *targetStart; while (source < sourceEnd) { UTF32 ch = 0; unsigned short extraBytesToRead = trailingBytesForUTF8[*source]; if (source + extraBytesToRead >= sourceEnd) { result = sourceExhausted; break; } /* Do this check whether lenient or strict */ if (! isLegalUTF8(source, extraBytesToRead+1)) { result = sourceIllegal; break; } /* * The cases all fall through. See "Note A" below. */ switch (extraBytesToRead) { case 5: ch += *source++; ch <<= 6; case 4: ch += *source++; ch <<= 6; case 3: ch += *source++; ch <<= 6; case 2: ch += *source++; ch <<= 6; case 1: ch += *source++; ch <<= 6; case 0: ch += *source++; } ch -= offsetsFromUTF8[extraBytesToRead]; if (target >= targetEnd) { source -= (extraBytesToRead+1); /* Back up the source pointer! */ result = targetExhausted; break; } if (ch <= UNI_MAX_LEGAL_UTF32) { /* * UTF-16 surrogate values are illegal in UTF-32, and anything * over Plane 17 (> 0x10FFFF) is illegal. */ if (ch >= UNI_SUR_HIGH_START && ch <= UNI_SUR_LOW_END) { if (flags == strictConversion) { source -= (extraBytesToRead+1); /* return to the illegal value itself */ result = sourceIllegal; break; } else { *target++ = UNI_REPLACEMENT_CHAR; } } else { *target++ = ch; } } else { /* i.e., ch > UNI_MAX_LEGAL_UTF32 */ result = sourceIllegal; *target++ = UNI_REPLACEMENT_CHAR; } } *sourceStart = source; *targetStart = target; return result; } /* --------------------------------------------------------------------- Note A. The fall-through switches in UTF-8 reading code save a temp variable, some decrements & conditionals. The switches are equivalent to the following loop: { int tmpBytesToRead = extraBytesToRead+1; do { ch += *source++; --tmpBytesToRead; if (tmpBytesToRead) ch <<= 6; } while (tmpBytesToRead > 0); } In UTF-8 writing code, the switches on "bytesToWrite" are similarly unrolled loops. --------------------------------------------------------------------- */ #endif /* WITH_UNICODE */ DBD-ODBC-1.61/unicode_helper.c0000644000175000017500000002675013614770327015262 0ustar martinmartin#ifdef WITH_UNICODE #include "ODBC.h" #include #include "ConvertUTF.h" typedef enum { do_new=1, do_cat, do_set } new_cat_set_t; /* static prototypes */ static long utf16_len(UTF16 *wp); static void utf16_copy(UTF16 *d, UTF16 *s); static SV * _dosvwv(pTHX_ SV * sv, UTF16 * wp, STRLEN len, new_cat_set_t mode); /* * If len>=0, wp is an array of wide characters without a * termination character. * If len==-1, wp is a null-terminated wide string */ static SV * _dosvwv(pTHX_ SV * sv, UTF16 * wp, STRLEN len, new_cat_set_t mode) { char * p=NULL; STRLEN svlen; #ifdef WIN32 int bytes; bytes=WideCharToMultiByte(CP_UTF8,0,wp,len,NULL,0,NULL,NULL); Newz(0,p,1+bytes,char); /* allocate bytes+1 chars - ptr to p */ if (bytes!=0) { if(!WideCharToMultiByte(CP_UTF8,0,wp,len,p,bytes,NULL,NULL)) { int err=GetLastError(); switch (err) { case ERROR_INSUFFICIENT_BUFFER: croak("_dosvwv: WideCharToMultiByte() failed: insufficient buffer"); case ERROR_INVALID_FLAGS: croak("_dosvwv: WideCharToMultiByte() failed: invalid flags"); case ERROR_INVALID_PARAMETER: croak("_dosvwv: WideCharToMultiByte() failed: invalid parameter"); default: croak("_dosvwv: WideCharToMultiByte() failed: error code %i",err); } } } svlen=(len==-1 ? strlen(p) : bytes); #else unsigned int bytes; if (len == -1) { len = utf16_len(wp); } if (len > 0) { ConversionResult ret; UTF16 *source_start = wp; UTF16 *source_end = source_start + len; UTF8 *target_start; UTF8 *target_end; /* Test conversion and find size UTF* of buffer we need */ ret = ConvertUTF16toUTF8((const UTF16 **)&source_start, source_end, NULL, NULL, strictConversion, &bytes); /*printf("Bytes Required = %d\n", bytes);*/ if (ret != conversionOK) { if (ret == sourceExhausted) { croak("_dosvwc: Partial character in input"); } else if (ret == targetExhausted) { croak("_dosvwc: target buffer exhausted"); } else if (ret == sourceIllegal) { croak("_dosvwc: malformed/illegal source sequence"); } else { croak("_dosvwc: unknown ConvertUTF16toUTF8 error"); } } Newz(0, p, bytes + 1, char); /* convert UTF16 to UTF8 */ target_start = p; target_end = p + bytes; source_start = (UTF16 *)wp; source_end = source_start + len; ret = ConvertUTF16toUTF8((const UTF16 **)&source_start, source_end, &target_start, target_end, strictConversion, &bytes); /*fprintf(stderr, "%s\n", p);*/ if (ret != conversionOK) { croak("_dosvwc: second call to ConvertUTF16toUTF8 failed (%d)", ret); } svlen = bytes; } else { svlen = 0; } #endif switch (mode) { case do_new: sv=newSVpvn(p,svlen); break; case do_cat: sv_catpvn_mg(sv,p,svlen); break; case do_set: sv_setpvn_mg(sv,p,svlen); break; default: croak("_dosvwv called with bad mode value"); } #ifdef sv_utf8_decode if (!sv_utf8_decode(sv)) { croak("Attempt to utf8 decode a non utf8 sequence"); } #else if (*p) { SvUTF8_on(sv); /*printf("Switching UTF8 on\n");*/ } else if (mode!=do_cat) { SvUTF8_off(sv); /* Don't switch off UTF8 just because we *APPENDED* an empty string! sv may still be UTF8. */ /*printf("Switching UTF8 off\n");*/ } #endif Safefree(p); return sv; } /* * Set the string value of an SV* to a representation of a UTF16 * value, * similar to sv_setpvn() and sv_setpv() * SV contains UTF-8 representation of wp, has UTF8-Flag on except for * empty strings * * wp is an array of wide characters without a termination character */ void sv_setwvn(pTHX_ SV * sv, UTF16 * wp, STRLEN len) { if (wp==NULL) { sv_setpvn(sv,NULL,len); } else if (len==0) { sv_setpvn(sv,"",0); } else { _dosvwv(aTHX_ sv,wp,len,do_set); } } SV *sv_newwvn(pTHX_ UTF16 * wp, STRLEN len) { SV *sv; /*printf("wp=%p, strlen=%d\n", wp, len);*/ if (wp==NULL) { sv = &PL_sv_undef; } else if (len==0) { sv = newSVpvn("",0); } else { sv = _dosvwv(aTHX_ NULL,wp,len,do_new); } return sv; } /* * Get a UTF16 * representation of a char * * The representation is a converted copy, so the result needs to be freed * usng WVfree(). * char * s == NULL is handled properly * * Does not handle byte arrays, only null-terminated strings. */ UTF16 * WValloc(char * s) { UTF16 * buf=NULL; if (NULL!=s) { #ifdef WIN32 int widechars=MultiByteToWideChar(CP_UTF8,0,s,-1,NULL,0); Newz(0,buf,widechars+1,UTF16); if (widechars!=0) { MultiByteToWideChar(CP_UTF8,0,s,-1,buf,widechars); } #else /* !WIN32 */ unsigned int widechrs, bytes; size_t slen; ConversionResult ret; UTF8 *source_start, *source_end; UTF16 *target_start, *target_end; slen = strlen(s); /*printf("utf8 string \\%.20s\\ is %d bytes long\n", s, slen);*/ source_start = s; /* source_end needs to include NUL and be 1 past as ConvertUTF8toUTF17 loops while < source_end */ source_end = s + slen + 1; ret = ConvertUTF8toUTF16( (const UTF8 **)&source_start, source_end, NULL, NULL, strictConversion, &bytes); if (ret != conversionOK) { if (ret == sourceExhausted) { croak("WValloc: Partial character in input"); } else if (ret == targetExhausted) { croak("WValloc: target buffer exhausted"); } else if (ret == sourceIllegal) { croak("WValloc: malformed/illegal source sequence"); } else { croak("WValloc: unknown ConvertUTF16toUTF8 error"); } } /*printf("utf8 -> utf16 requires %d bytes\n", bytes);*/ widechrs = bytes / sizeof(UTF16); /*printf("Allocating %d wide chrs\n", widechrs);*/ Newz(0,buf,widechrs + 1,UTF16); if (widechrs != 0) { source_start = s; /* 1 after NUL because ConvertUTF8toUTF16 does while < end */ source_end = s + slen + 1; target_start = buf; /* in ConvertUTF8toUTF16 once target_end hit buf is exhausted */ target_end = buf + widechrs; /*printf("ss=%p se=%p ts=%p te=%p\n", source_start, source_end, target_start, target_end);*/ ret = ConvertUTF8toUTF16( (const UTF8 **)&source_start, source_end, &target_start, target_end, strictConversion, &bytes); if (ret != conversionOK) { croak("WValloc: second call to ConvertUTF8toUTF16 failed (%d)", ret); } /*printf("Second returned %d bytes\n", bytes);*/ } #endif /* WIN32 */ } return buf; } /* * Free a UTF16 * representation of a char * * Used to free the return values of WValloc() */ void WVfree(UTF16 * wp) { if (wp != NULL) Safefree(wp); } /* * Get a char * representation of a UTF16 * * The representation is a converted copy, so the result needs to be freed * using PVfree(). * wp == NULL is handled properly * * Does not handle byte arrays, only null-terminated strings. */ char * PVallocW(UTF16 * wp) { char * p=NULL; if (wp!=NULL) { #ifdef WIN32 int bytes=WideCharToMultiByte( CP_UTF8, /* convert to UTF8 */ 0, /* no flags */ wp, /* wide chrs to convert */ -1, /* wp is null terminated */ NULL, /* no conversion output */ 0, /* return how many bytes we need */ NULL, /* default chr - must be NULL for UTF-8 */ NULL); /* was default chr used - must be NULL for UTF-8 */ if (bytes == 0) { DWORD err; err = GetLastError(); croak("WideCharToMultiByte() failed with %ld", err); } Newz(0,p,bytes,char); /* allocate "bytes" chars */ if (!WideCharToMultiByte(CP_UTF8,0,wp,-1,p,bytes,NULL,NULL)) { DWORD err; err = GetLastError(); croak("WideCharToMultiByte() failed with %ld, bytes=%d, chrs=%d", err, bytes, wcslen(wp)); } #else ConversionResult ret; UTF16 *source_start; UTF16 *source_end; unsigned int bytes; UTF8 *target_start; UTF8 *target_end; long len; if (wp != NULL) { len = utf16_len(wp); } source_start = (UTF16 *)wp; source_end = source_start + len; ret = ConvertUTF16toUTF8((const UTF16 **)&source_start, source_end, NULL, NULL, strictConversion, &bytes); if (ret != conversionOK) { if (ret == sourceExhausted) { croak("PVallocW: Partial character in input"); } else if (ret == targetExhausted) { croak("PVallocW: target buffer exhausted"); } else if (ret == sourceIllegal) { croak("PVallocW: malformed/illegal source sequence"); } else { croak("PVallocW: unknown ConvertUTF16toUTF8 error"); } } Newz(0,p,bytes,char); target_start = p; target_end = p + bytes; source_start = (UTF16 *)wp; source_end = source_start + len; ret = ConvertUTF16toUTF8((const UTF16 **)&source_start, source_end, &target_start, target_end, strictConversion, &bytes); if (ret != conversionOK) { croak("PVallocW: second call to ConvertUTF16toUTF8 failed (%d)", ret); } #endif } return p; } /* * Free a UTF16 * representation of a char * * Used to free the return value of PVallocW() * char * s == NULL is handled properly */ void PVfreeW(char * s) { if (s!=NULL) Safefree(s); } /* * Mutate an SV's PV INPLACE to contain UTF-16. Does not handle byte arrays, * only null-terminated strings. * Turns the UTF8 flag OFF unconditionally, because SV becomes a byte array * (for Perl). */ void SV_toWCHAR(pTHX_ SV * sv) { STRLEN len; UTF16 * wp; char * p; if (!SvOK(sv)) { /* warn("SV_toWCHAR called for undef"); */ return; } /* _force makes sure SV is only a string */ p=SvPVutf8_force(sv,len); /*printf("p=%p, strlen(p) = %d\n", p, strlen(p));*/ wp=WValloc(p); /* allocate wp containing utf16 copy of utf8 p */ len=utf16_len(wp); p=SvGROW(sv,sizeof(UTF16)*(1+len)); utf16_copy((UTF16 *)p,wp); SvCUR_set(sv,sizeof(UTF16)*len); WVfree(wp); SvPOK_only(sv); /* sv is nothing but a non-UTF8 string -- for Perl ;-) */ } /* change a UTF8 encoded SV to a wide chr string in place - see SV_toWCHAR */ void utf8sv_to_wcharsv(pTHX_ SV *sv) { #ifdef sv_utf8_decode sv_utf8_decode(sv); #else SvUTF8_on(sv); #endif SV_toWCHAR(aTHX_ sv); } static long utf16_len(UTF16 *wp) { long len = 0; if (!wp) return 0; while (*wp != 0) { wp++; len++; } return len; } static void utf16_copy(UTF16 *d, UTF16 *s) { while(*s) { /*printf("Copying %p %d\n", s, *s);*/ *d++ = *s++; } *d = 0; } #endif /* WITH_UNICODE */ DBD-ODBC-1.61/if_you_are_taking_over_this_code.txt0000644000175000017500000000465612250310263021405 0ustar martinmartinIf you are taking over the maintainership of DBD::ODBC - good luck. I don't want to say anything more that will put you off ;-) so honestly, good luck. However there are a few things I can say that might help you: o make sure you install Test::Pod, Test::Pod::Coverage and Test::Kwalitee as tests using those modules only get run when they are installed. If you don't install them you can bet someone else will (probably someone packaging Perl modules for a Linux distro) and you will have missed the failures. o when a new DBI is released get hold of it, get the latest dbivport.h and copy it into DBD::ODBC. o when a new Devel::PPPort is released get hold of it and copy the latest ppport.h into DBD::ODBC as dbipport.h then persuade someone looking after DBI to change DBIXS.h from #include "dbipport.h" to #include I never managed to get that done. o remember that when a user installs a new DBI he generally needs to recompile his DBDs - people will forget this and it can cause a load of problems. o a lot of people have worked on DBD::ODBC over the years and in all cases I know they were volunteers. This means it was not their day job and they did their best but sometimes took shortcuts. That sometimes means a patch was provided and it looked ok and seemed to solve a problem so it was committed and all was well even if the patch did not look so good. o there are a lot of people using old and buggy ODBC drivers (or ODBC driver managers) and DBD::ODBC has a lot of workarounds for them. I'd suggest that if you don't know something is definitely broken then don't change it. I wasted some time trying to trim things down only to find I broke older drivers and driver managers. o dbi-dev mailing list is your friend - use it. o Microsoft wrote the ODBC spec then handed it to X/Open but continue to change it without reference to X/Open. This will happen again. Be aware of it and live with it (it has just happended again with ODBC 3.8!) and it hit me hard when 32bit moved to 64bit and the spec changed over night. o at this time, unicode in ODBC NEEDS a unicode aware ODBC Driver i.e., it must have the wide SQLxxxW functions. People will tell you that the driver manager can "translate" between ANSI and WIDE APIs (or the driver can do UTF-8 etc) but it is nonesense e.g., unixODBC does an ok job of this but it does not work with bound columns, ODBC does not do UTF-8! DBD-ODBC-1.61/dbivport.h0000644000175000017500000000374012250310263014105 0ustar martinmartin/* dbivport.h Provides macros that enable greater portability between DBI versions. This file should be *copied* and included in driver distributions and #included into the source, after #include DBIXS.h New driver releases should include an updated copy of dbivport.h from the most recent DBI release. */ #ifndef DBI_VPORT_H #define DBI_VPORT_H #ifndef DBIh_SET_ERR_CHAR /* Emulate DBIh_SET_ERR_CHAR Only uses the err_i, errstr and state parameters. */ #define DBIh_SET_ERR_CHAR(h, imp_xxh, err_c, err_i, errstr, state, method) \ sv_setiv(DBIc_ERR(imp_xxh), err_i); \ (state) ? (void)sv_setpv(DBIc_STATE(imp_xxh), state) : (void)SvOK_off(DBIc_STATE(imp_xxh)); \ sv_setpv(DBIc_ERRSTR(imp_xxh), errstr) #endif #ifndef DBIcf_Executed #define DBIcf_Executed 0x080000 #endif #ifndef DBIc_TRACE_LEVEL_MASK #define DBIc_TRACE_LEVEL_MASK 0x0000000F #define DBIc_TRACE_FLAGS_MASK 0xFFFFFF00 #define DBIc_TRACE_SETTINGS(imp) (DBIc_DBISTATE(imp)->debug) #define DBIc_TRACE_LEVEL(imp) (DBIc_TRACE_SETTINGS(imp) & DBIc_TRACE_LEVEL_MASK) #define DBIc_TRACE_FLAGS(imp) (DBIc_TRACE_SETTINGS(imp) & DBIc_TRACE_FLAGS_MASK) /* DBIc_TRACE_MATCHES - true if s1 'matches' s2 (c.f. trace_msg()) DBIc_TRACE_MATCHES(foo, DBIc_TRACE_SETTINGS(imp)) */ #define DBIc_TRACE_MATCHES(s1, s2) \ ( ((s1 & DBIc_TRACE_LEVEL_MASK) >= (s2 & DBIc_TRACE_LEVEL_MASK)) \ || ((s1 & DBIc_TRACE_FLAGS_MASK) & (s2 & DBIc_TRACE_FLAGS_MASK)) ) /* DBIc_TRACE - true if flags match & DBI level>=flaglevel, or if DBI level>level DBIc_TRACE(imp, 0, 0, 4) = if level >= 4 DBIc_TRACE(imp, DBDtf_FOO, 2, 4) = if tracing DBDtf_FOO & level>=2 or level>=4 DBIc_TRACE(imp, DBDtf_FOO, 2, 0) = as above but never trace just due to level */ #define DBIc_TRACE(imp, flags, flaglevel, level) \ ( (flags && (DBIc_TRACE_FLAGS(imp) & flags) && (DBIc_TRACE_LEVEL(imp) >= flaglevel)) \ || (level && DBIc_TRACE_LEVEL(imp) >= level) ) #endif #endif /* !DBI_VPORT_H */ DBD-ODBC-1.61/dbdimp.c0000644000175000017500000105075513614770327013537 0ustar martinmartin/* * portions Copyright (c) 1994,1995,1996,1997 Tim Bunce * portions Copyright (c) 1997 Thomas K. Wenrich * portions Copyright (c) 1997-2001 Jeff Urlwin * portions Copyright (c) 2007-2013 Martin J. Evans * * You may distribute under the terms of either the GNU General Public * License or the Artistic License, as specified in the Perl README file. * */ /* * NOTES: * * o Trace levels 1 and 2 are reserved for DBI (see DBI::DBD) so don't * use them here in DBIc_TRACE_LEVEL tests. * "Trace Levels" in DBI defines trace levels as: * 0 - Trace disabled. * 1 - Trace DBI method calls returning with results or errors. * 2 - Trace method entry with parameters and returning with results. * 3 - As above, adding some high-level information from the driver * and some internal information from the DBI. * 4 - As above, adding more detailed information from the driver. * 5 to 15 - As above but with more and more obscure information. * * SV Manipulation Functions * http://perl.active-venture.com/pod/perlapi-svfunctions.html * Formatted Printing of IVs, UVs, and NVs * http://perldoc.perl.org/perlguts.html#Formatted-Printing-of-IVs,-UVs,-and-NVs * http://cpansearch.perl.org/src/RURBAN/illguts-0.44/index.html * Internal replacements for standard C library functions: * http://perldoc.perl.org/perlclib.html * http://search.cpan.org/dist/Devel-PPPort/PPPort.pm * * MS ODBC 64 bit: * http://msdn.microsoft.com/en-us/library/ms716287%28v=vs.85%29.aspx */ #include #define NEED_newRV_noinc #define NEED_sv_2pv_flags #define NEED_my_snprintf #include "ODBC.h" #if defined(WITH_UNICODE) # include "unicode_helper.h" #endif /* trap iODBC on Unicode builds */ #if defined(WITH_UNICODE) && (defined(_IODBCUNIX_H) || defined(_IODBCEXT_H)) #error DBD::ODBC will not run properly with iODBC in unicode mode as iODBC defines wide characters as being 4 bytes in size #endif /* DBI defines the following but not until 1.617 so we replicate here for now */ /* will remove when DBD::ODBC requires 1.617 or above */ #ifndef DBIf_TRACE_SQL # define DBIf_TRACE_SQL 0x00000100 #endif #ifndef DBIf_TRACE_CON # define DBIf_TRACE_CON 0x00000200 #endif #ifndef DBIf_TRACE_ENC # define DBIf_TRACE_ENC 0x00000400 #endif #ifndef DBIf_TRACE_DBD # define DBIf_TRACE_DBD 0x00000800 #endif #ifndef DBIf_TRACE_TXN # define DBIf_TRACE_TXN 0x000001000 #endif /* combined DBI trace connection and encoding flags with DBD::ODBC ones */ /* Historically DBD::ODBC had 2 flags before they were made DBI ones */ #define UNICODE_TRACING (0x02000000|DBIf_TRACE_ENC|DBIf_TRACE_DBD) #define CONNECTION_TRACING (0x04000000|DBIf_TRACE_CON|DBIf_TRACE_DBD) #define DBD_TRACING DBIf_TRACE_DBD #define TRANSACTION_TRACING (DBIf_TRACE_TXN|DBIf_TRACE_DBD) #define SQL_TRACING (DBIf_TRACE_SQL|DBIf_TRACE_DBD) #define TRACE0(a,b) PerlIO_printf(DBIc_LOGPIO(a), (b)) #define TRACE1(a,b,c) PerlIO_printf(DBIc_LOGPIO(a), (b), (c)) #define TRACE2(a,b,c,d) PerlIO_printf(DBIc_LOGPIO(a), (b), (c), (d)) #define TRACE3(a,b,c,d,e) PerlIO_printf(DBIc_LOGPIO(a), (b), (c), (d), (e)) /* An error return reserved for our internal use and should not clash with any ODBC error codes like SQL_ERROR, SQL_INVALID_HANDLE etc. It is used so we can call dbd_error but indicate there is no point in calling SQLError as the error is internal */ #define DBDODBC_INTERNAL_ERROR -999 static int taf_callback_wrapper ( void *handle, int type, int event); static int get_row_diag(SQLSMALLINT recno, imp_sth_t *imp_sth, char *state, SQLINTEGER *native, char *msg, size_t max_msg); static SQLSMALLINT default_parameter_type( char *why, imp_sth_t *imp_sth, phs_t *phs); static int post_connect(pTHX_ SV *dbh, imp_dbh_t *imp_dbh, SV *attr); static int set_odbc_version(pTHX_ SV *dbh, imp_dbh_t *imp_dbh, SV* attr); static const char *S_SqlTypeToString (SWORD sqltype); static const char *S_SqlCTypeToString (SWORD sqltype); static const char *cSqlTables = "SQLTables(%s,%s,%s,%s)"; static const char *cSqlPrimaryKeys = "SQLPrimaryKeys(%s,%s,%s)"; static const char *cSqlStatistics = "SQLStatistics(%s,%s,%s,%d,%d)"; static const char *cSqlForeignKeys = "SQLForeignKeys(%s,%s,%s,%s,%s,%s)"; static const char *cSqlColumns = "SQLColumns(%s,%s,%s,%s)"; static const char *cSqlGetTypeInfo = "SQLGetTypeInfo(%d)"; static SQLRETURN bind_columns(SV *h, imp_sth_t *imp_sth); static void AllODBCErrors(HENV henv, HDBC hdbc, HSTMT hstmt, int output, PerlIO *logfp); static int check_connection_active(pTHX_ SV *h); static int build_results(pTHX_ SV *sth, imp_sth_t *imp_sth, SV *dbh, imp_dbh_t *imp_dbh, RETCODE orc); static int rebind_param(pTHX_ SV *sth, imp_sth_t *imp_sth, imp_dbh_t *imp_dbh, phs_t *phs); static void get_param_type(SV *sth, imp_sth_t *imp_sth, imp_dbh_t *imp_dbh, phs_t *phs); static void check_for_unicode_param(imp_sth_t *imp_sth, phs_t *phs); /* Function to get the console window handle which we may use in SQLDriverConnect on WIndows */ #ifdef WIN32 static HWND GetConsoleHwnd(void); #endif int dbd_describe(SV *sth, imp_sth_t *imp_sth, int more); int dbd_db_login6_sv(SV *dbh, imp_dbh_t *imp_dbh, SV *dbname, SV *uid, SV *pwd, SV *attr); int dbd_db_login6(SV *dbh, imp_dbh_t *imp_dbh, char *dbname, char *uid, char *pwd, SV *attr); int dbd_st_finish(SV *sth, imp_sth_t *imp_sth); IV dbd_st_execute_iv(SV *sth, imp_sth_t *imp_sth); /* for sanity/ease of use with potentially null strings */ #define XXSAFECHAR(p) ((p) ? (p) : "(null)") /* unique value for db attrib that won't conflict with SQL types, just * increment by one if you are adding */ #define ODBC_IGNORE_NAMED_PLACEHOLDERS 0x8332 #define ODBC_DEFAULT_BIND_TYPE 0x8333 #define ODBC_ASYNC_EXEC 0x8334 #define ODBC_ERR_HANDLER 0x8335 #define ODBC_ROWCACHESIZE 0x8336 #define ODBC_ROWSINCACHE 0x8337 #define ODBC_FORCE_REBIND 0x8338 #define ODBC_EXEC_DIRECT 0x8339 #define ODBC_VERSION 0x833A #define ODBC_CURSORTYPE 0x833B #define ODBC_QUERY_TIMEOUT 0x833C #define ODBC_HAS_UNICODE 0x833D #define ODBC_PUTDATA_START 0x833E #define ODBC_OUTCON_STR 0x833F #define ODBC_COLUMN_DISPLAY_SIZE 0x8340 #define ODBC_UTF8_ON 0x8341 #define ODBC_FORCE_BIND_TYPE 0x8342 #define ODBC_DESCRIBE_PARAMETERS 0x8344 #define ODBC_DRIVER_COMPLETE 0x8345 #define ODBC_BATCH_SIZE 0x8346 #define ODBC_ARRAY_OPERATIONS 0x8347 #define ODBC_TAF_CALLBACK 0x8348 /* This is the bind type for parameters we fall back to if the bind_param method was not given a parameter type and SQLDescribeParam is not supported or failed. It also defines the point we switch from VARCHAR to LONGVARCHAR */ #ifdef WITH_UNICODE # define ODBC_BACKUP_BIND_TYPE_VALUE SQL_WVARCHAR # define ODBC_SWITCH_TO_LONGVARCHAR 2000 #else # define ODBC_BACKUP_BIND_TYPE_VALUE SQL_VARCHAR # define ODBC_SWITCH_TO_LONGVARCHAR 4000 #endif DBISTATE_DECLARE; void dbd_init(dbistate_t *dbistate) { dTHX; DBISTATE_INIT; PERL_UNUSED_ARG(dbistate); } static RETCODE odbc_set_query_timeout( imp_dbh_t *imp_dbh, HSTMT hstmt, UV odbc_timeout) { RETCODE rc; if (DBIc_TRACE(imp_dbh, DBD_TRACING, 0, 3)) { TRACE1(imp_dbh, " Set timeout to: %"UVuf"\n", odbc_timeout); } rc = SQLSetStmtAttr(hstmt,(SQLINTEGER)SQL_ATTR_QUERY_TIMEOUT, (SQLPOINTER)odbc_timeout,(SQLINTEGER)SQL_IS_INTEGER); if (!SQL_SUCCEEDED(rc)) { /* Some drivers get upset with this so we ignore errors and just trace the problem */ if (DBIc_TRACE(imp_dbh, DBD_TRACING, 0, 3)) TRACE1( imp_dbh, " Failed to set Statement ATTR Query Timeout to %"UVuf"\n", odbc_timeout); } return rc; } static void odbc_clear_result_set(SV *sth, imp_sth_t *imp_sth) { dTHX; SV *value; char *key; I32 keylen; if (DBIc_TRACE(imp_sth, DBD_TRACING, 0, 3)) { TRACE0(imp_sth, "odbc_clear_result_set\n"); } Safefree(imp_sth->fbh); Safefree(imp_sth->ColNames); Safefree(imp_sth->RowBuffer); /* dgood - Yikes! I don't want to go down to this level, */ /* but if I don't, it won't figure out that the */ /* number of columns have changed... */ if (DBIc_FIELDS_AV(imp_sth)) { sv_free((SV*)DBIc_FIELDS_AV(imp_sth)); DBIc_FIELDS_AV(imp_sth) = Nullav; } while ( (value = hv_iternextsv((HV*)SvRV(sth), &key, &keylen)) ) { if (strncmp(key, "NAME_", 5) == 0 || strncmp(key, "TYPE", 4) == 0 || strncmp(key, "PRECISION", 9) == 0 || strncmp(key, "SCALE", 5) == 0 || strncmp(key, "NULLABLE", 8) == 0) { (void)hv_delete((HV*)SvRV(sth), key, keylen, G_DISCARD); if (DBIc_TRACE(imp_sth, DBD_TRACING, 0, 4)) { TRACE2(imp_sth, " ODBC_CLEAR_RESULTS '%s' => %s\n", key, neatsvpv(value,0)); } } } imp_sth->fbh = NULL; imp_sth->ColNames = NULL; imp_sth->RowBuffer = NULL; imp_sth->done_desc = 0; } static void odbc_handle_outparams(pTHX_ imp_sth_t *imp_sth, int debug) { int i = (imp_sth->out_params_av) ? AvFILL(imp_sth->out_params_av)+1 : 0; if (debug >= 3) TRACE1(imp_sth, " processing %d output parameters\n", i); while (--i >= 0) { phs_t *phs = (phs_t*)(void*)SvPVX(AvARRAY(imp_sth->out_params_av)[i]); SV *sv = phs->sv; if (debug >= 8) { TRACE2(imp_sth, " outparam %s, length:%ld\n", phs->name, (long)phs->strlen_or_ind); } /* phs->strlen_or_ind has been updated by ODBC to hold the length * of the result */ if (phs->strlen_or_ind != SQL_NULL_DATA) { /* * When ODBC fills an output parameter buffer, the size of the * data that were available is written into the memory location * provided by strlen_or_ind pointer argument during the * SQLBindParameter() call. * * If the number of bytes available exceeds the size of the output * buffer, ODBC will truncate the data such that it fits in the * available buffer. However, the strlen_or_ind will still reflect * the size of the data before it was truncated. * * This fact provides us a way to detect truncation on this particular * output parameter. Otherwise, the only way to detect truncation is * through a follow-up to a SQL_SUCCESS_WITH_INFO result. Such a call * cannot return enough information to state exactly where the * truncation occurred. */ SvPOK_only(sv); /* string, disable other OK bits */ if (phs->strlen_or_ind > phs->maxlen) { /* out param truncated */ SvCUR_set(sv, phs->maxlen); *SvEND(sv) = '\0'; /* null terminate */ if (debug >= 2) { PerlIO_printf( DBIc_LOGPIO(imp_sth), " outparam %s = '%s'\t(TRUNCATED from %ld to %ld)\n", phs->name, SvPV_nolen(sv), (long)phs->strlen_or_ind, (long)phs->maxlen); } } else { /* no truncation occurred */ SvCUR_set(sv, phs->strlen_or_ind); /* new length */ *SvEND(sv) = '\0'; /* null terminate */ if (phs->strlen_or_ind == phs->maxlen && (phs->sql_type == SQL_NUMERIC || phs->sql_type == SQL_DECIMAL || phs->sql_type == SQL_INTEGER || phs->sql_type == SQL_SMALLINT || phs->sql_type == SQL_FLOAT || phs->sql_type == SQL_REAL || phs->sql_type == SQL_DOUBLE)) { /* * fix up for oracle, which leaves the buffer at the size * requested, but only returns a few characters. The * intent is to truncate down to the actual number of * characters necessary. Need to find the first null * byte and set the length there. */ char *pstart = SvPV_nolen(sv); char *p = pstart; while (*p != '\0') { p++; } if (debug >= 2) { PerlIO_printf( DBIc_LOGPIO(imp_sth), " outparam %s = '%s'\t(len %ld), is numeric end" " of buffer = %ld\n", phs->name, SvPV(sv,PL_na), (long)phs->strlen_or_ind, (long)(p - pstart)); } SvCUR_set(sv, p - pstart); } } } else { /* is NULL */ if (debug >= 2) TRACE1(imp_sth, " outparam %s = undef (NULL)\n", phs->name); (void)SvOK_off(phs->sv); } } } static int build_results(pTHX_ SV *sth, imp_sth_t *imp_sth, SV *dbh, imp_dbh_t *imp_dbh, RETCODE orc) { RETCODE rc; if (DBIc_TRACE(imp_sth, DBD_TRACING, 0, 3)) TRACE2(imp_sth, " build_results sql %p\t%s\n", imp_sth->hstmt, imp_sth->statement); /* init sth pointers */ imp_sth->fbh = NULL; imp_sth->ColNames = NULL; imp_sth->RowBuffer = NULL; imp_sth->RowCount = -1; imp_sth->odbc_column_display_size = imp_dbh->odbc_column_display_size; imp_sth->odbc_utf8_on = imp_dbh->odbc_utf8_on; if (!dbd_describe(sth, imp_sth, 0)) { if (DBIc_TRACE(imp_sth, DBD_TRACING, 0, 3)) { TRACE0(imp_sth, " !!dbd_describe failed, build_results...!\n"); } SQLFreeHandle(SQL_HANDLE_STMT, imp_sth->hstmt); imp_sth->hstmt = SQL_NULL_HSTMT; return 0; /* dbd_describe already called dbd_error() */ } if (DBIc_TRACE(imp_sth, DBD_TRACING, 0, 3)) { TRACE0(imp_sth, " dbd_describe build_results #2...!\n"); } /* TO_DO why is dbd_describe called again? */ if (dbd_describe(sth, imp_sth, 0) <= 0) { if (DBIc_TRACE(imp_sth, DBD_TRACING, 0, 3)) { TRACE0(imp_sth, " dbd_describe build_results #3...!\n"); } return 0; } DBIc_IMPSET_on(imp_sth); if (orc != SQL_NO_DATA) { imp_sth->RowCount = -1; rc = SQLRowCount(imp_sth->hstmt, &imp_sth->RowCount); dbd_error(sth, rc, "build_results/SQLRowCount"); if (rc != SQL_SUCCESS) { DBIc_ROW_COUNT(imp_sth) = -1; return -1; } DBIc_ROW_COUNT(imp_sth) = imp_sth->RowCount; } else { imp_sth->RowCount = 0; DBIc_ROW_COUNT(imp_sth) = 0; } DBIc_ACTIVE_on(imp_sth); /* XXX should only set for select ? */ return 1; } int odbc_discon_all(SV *drh, imp_drh_t *imp_drh) { dTHX; /* The disconnect_all concept is flawed and needs more work */ if (!PL_dirty && !SvTRUE(get_sv("DBI::PERL_ENDING",0))) { DBIh_SET_ERR_CHAR(drh, (imp_xxh_t*)imp_drh, Nullch, 1, "disconnect_all not implemented", Nullch, Nullch); return FALSE; } return FALSE; } /* error : <=(-2), ok row count : >=0, unknown count : (-1) */ SQLLEN dbd_db_execdirect(SV *dbh, SV *statement ) { dTHX; D_imp_dbh(dbh); SQLRETURN ret; /* SQLxxx return value */ SQLLEN rows; SQLHSTMT stmt; int dbh_active; if ((dbh_active = check_connection_active(aTHX_ dbh)) == 0) return 0; ret = SQLAllocHandle(SQL_HANDLE_STMT, imp_dbh->hdbc, &stmt ); if (!SQL_SUCCEEDED(ret)) { dbd_error( dbh, ret, "Statement allocation error" ); return(-2); } /* if odbc_query_timeout has been set, set it in the driver */ if (imp_dbh->odbc_query_timeout != -1) { ret = odbc_set_query_timeout(imp_dbh, stmt, imp_dbh->odbc_query_timeout); if (!SQL_SUCCEEDED(ret)) { dbd_error(dbh, ret, "execdirect set_query_timeout"); } /* don't fail if the query timeout can't be set. */ } if (DBIc_TRACE(imp_dbh, SQL_TRACING, 0, 3)) { TRACE1(imp_dbh, " SQLExecDirect %s\n", SvPV_nolen(statement)); } #ifdef WITH_UNICODE if (SvOK(statement) && DO_UTF8(statement)) { SQLWCHAR *wsql; STRLEN wsql_len; SV *sql_copy; if (DBIc_TRACE(imp_dbh, UNICODE_TRACING, 0, 0)) /* odbcunicode */ TRACE0(imp_dbh, " Processing utf8 sql in unicode mode\n"); sql_copy = sv_mortalcopy(statement); SV_toWCHAR(aTHX_ sql_copy); wsql = (SQLWCHAR *)SvPV(sql_copy, wsql_len); ret = SQLExecDirectW(stmt, wsql, wsql_len / sizeof(SQLWCHAR)); } else { if (DBIc_TRACE(imp_dbh, UNICODE_TRACING, 0, 0)) /* odbcunicode */ TRACE0(imp_dbh, " Processing non utf8 sql in unicode mode\n"); ret = SQLExecDirect(stmt, (SQLCHAR *)SvPV_nolen(statement), SQL_NTS); } #else if (DBIc_TRACE(imp_dbh, UNICODE_TRACING, 0, 0)) /* odbcunicode */ TRACE0(imp_dbh, " Processing sql in non-unicode mode\n"); ret = SQLExecDirect(stmt, (SQLCHAR *)SvPV_nolen(statement), SQL_NTS); #endif if (DBIc_TRACE(imp_dbh, DBD_TRACING, 0, 3)) TRACE1(imp_dbh, " SQLExecDirect = %d\n", ret); if (!SQL_SUCCEEDED(ret) && ret != SQL_NO_DATA) { dbd_error2(dbh, ret, "Execute immediate failed", imp_dbh->henv, imp_dbh->hdbc, stmt ); rows = -2; /* error */ } else { if (ret == SQL_NO_DATA) { rows = 0; } else if (ret != SQL_SUCCESS) { dbd_error2(dbh, ret, "Execute immediate success with info", imp_dbh->henv, imp_dbh->hdbc, stmt ); } ret = SQLRowCount(stmt, &rows); if (!SQL_SUCCEEDED(ret)) { dbd_error( dbh, ret, "SQLRowCount failed" ); rows = -1; } } ret = SQLFreeHandle(SQL_HANDLE_STMT,stmt); if (!SQL_SUCCEEDED(ret)) { dbd_error2(dbh, ret, "Statement destruction error", imp_dbh->henv, imp_dbh->hdbc, stmt); } return rows; } void dbd_db_destroy(SV *dbh, imp_dbh_t *imp_dbh) { if (DBIc_ACTIVE(imp_dbh)) dbd_db_disconnect(dbh, imp_dbh); /* Nothing in imp_dbh to be freed */ DBIc_IMPSET_off(imp_dbh); if (DBIc_TRACE(imp_dbh, DBD_TRACING, 0, 8)) TRACE0(imp_dbh, " DBD::ODBC Disconnected!\n"); } /* * quick dumb function to handle case insensitivity for DSN= or DRIVER= * in DSN...note this is because strncmpi is not available on all * platforms using that name (VC++, Debian, etc most notably). * Note, also, strupr doesn't seem to have a standard name, either... */ int dsnHasDriverOrDSN(char *dsn) { char upper_dsn[512]; char *cp = upper_dsn; strncpy(upper_dsn, dsn, sizeof(upper_dsn)-1); upper_dsn[sizeof(upper_dsn)-1] = '\0'; while (*cp != '\0') { *cp = toupper(*cp); cp++; /* see rt 79190 was a sequence point error*/ } return (strncmp(upper_dsn, "DSN=", 4) == 0 || strncmp(upper_dsn, "DRIVER=", 7) == 0); } int dsnHasUIDorPWD(char *dsn) { char upper_dsn[512]; char *cp = upper_dsn; strncpy(upper_dsn, dsn, sizeof(upper_dsn)-1); upper_dsn[sizeof(upper_dsn)-1] = '\0'; while (*cp != '\0') { *cp = toupper(*cp); cp++; /* see rt 79190 was a sequence point error*/ } return (strstr(upper_dsn, "UID=") != 0 || strstr(upper_dsn, "PWD=") != 0); } /************************************************************************/ /* */ /* dbd_db_login */ /* ============ */ /* */ /* NOTE: This is the old 5 argument version with no attribs */ /* */ /************************************************************************/ int dbd_db_login( SV *dbh, imp_dbh_t *imp_dbh, char *dbname, char *uid, char *pwd) { return dbd_db_login6(dbh, imp_dbh, dbname, uid, pwd, Nullsv); } /************************************************************************/ /* */ /* dbd_db_login6_sv */ /* ================ */ /* */ /* This API was introduced in DBI after 1.607 (subversion revision */ /* 11723) and is the same as dbd_db_login6 except the connection */ /* strings are SVs so we can detect unicode strings and call */ /* SQLDriveConnectW. */ /* */ /************************************************************************/ int dbd_db_login6_sv( SV *dbh, imp_dbh_t *imp_dbh, SV *dbname, SV *uid, SV *pwd, SV *attr) { dTHX; #ifndef WITH_UNICODE if (DBIc_TRACE(imp_dbh, CONNECTION_TRACING, 0, 0)) TRACE0(imp_dbh, "non-Unicode login6_sv\n"); return dbd_db_login6(dbh, imp_dbh, SvPV_nolen(dbname), (SvOK(uid) ? SvPV_nolen(uid) : NULL), (SvOK(pwd) ? SvPV_nolen(pwd) : NULL), attr); #else D_imp_drh_from_dbh; SQLRETURN rc; SV *wconstr; /* copy of connection string in wide chrs */ /* decoded connection string in wide characters and its length to work around an issue in older unixODBCs */ SQLWCHAR dc_constr[512]; STRLEN dc_constr_len; if (DBIc_TRACE(imp_dbh, CONNECTION_TRACING, 0, 0)) { TRACE2(imp_dbh, "Unicode login6 dbname=%s, uid=%s, pwd=xxxxx\n", SvPV_nolen(dbname), neatsvpv(uid, 0)); } imp_dbh->out_connect_string = Nullsv; if (!imp_drh->connects) { rc = SQLAllocHandle(SQL_HANDLE_ENV, SQL_NULL_HANDLE, &imp_drh->henv); dbd_error(dbh, rc, "db_login6_sv/SQLAllocHandle(env)"); if (!SQL_SUCCEEDED(rc)) return 0; if (set_odbc_version(aTHX_ dbh, imp_dbh, attr) != 1) return 0; } imp_dbh->henv = imp_drh->henv; /* needed for dbd_error */ /* If odbc_trace_file set, set it in ODBC */ { SV **attr_sv; char *file; if ((attr_sv = DBD_ATTRIB_GET_SVP(attr, "odbc_trace_file", (I32)strlen("odbc_trace_file"))) != NULL) { if (SvPOK(*attr_sv)) { file = SvPV_nolen(*attr_sv); rc = SQLSetConnectAttr(NULL, SQL_ATTR_TRACEFILE, file, strlen(file)); if (!SQL_SUCCEEDED(rc)) { warn("Failed to set trace file"); } } } } /* If odbc_trace enabled, turn ODBC tracing on */ { UV dc = 0; SV **svp; DBD_ATTRIB_GET_IV(attr, "odbc_trace", 10, svp, dc); if (svp && dc) { rc = SQLSetConnectAttr(NULL, SQL_ATTR_TRACE, (SQLPOINTER)SQL_OPT_TRACE_ON, 0); if (!SQL_SUCCEEDED(rc)) { warn("Failed to enable tracing"); } } } rc = SQLAllocHandle(SQL_HANDLE_DBC, imp_drh->henv, &imp_dbh->hdbc); if (!SQL_SUCCEEDED(rc)) { dbd_error(dbh, rc, "db_login6_sv/SQLAllocHandle(dbc)"); if (imp_drh->connects == 0) { SQLFreeHandle(SQL_HANDLE_ENV, imp_drh->henv); imp_drh->henv = SQL_NULL_HENV; imp_dbh->henv = SQL_NULL_HENV; /* needed for dbd_error */ } return 0; } /* If odbc_driver_complete specified we need to grab it */ { UV dc = 0; SV **svp; DBD_ATTRIB_GET_IV(attr, "odbc_driver_complete", 20, svp, dc); if (svp && dc) { imp_dbh->odbc_driver_complete = 1; } else { imp_dbh->odbc_driver_complete = 0; } } /* If the connection string is too long to pass to SQLConnect or it contains DSN or DRIVER, we've little choice but to call SQLDriverConnect and need to tag the uid/pwd on the end of the connection string (unless they already exist). */ if ((SvCUR(dbname) > SQL_MAX_DSN_LENGTH || /* too big for SQLConnect */ dsnHasDriverOrDSN(SvPV_nolen(dbname))) && !dsnHasUIDorPWD(SvPV_nolen(dbname))) { if (SvOK(uid)) { sv_catpv(dbname, ";UID="); sv_catsv(dbname, uid); } if (SvOK(pwd)) { sv_catpv(dbname, ";PWD="); sv_catsv(dbname, pwd); } sv_catpv(dbname, ";"); /*sv_catpvf(dbname, ";UID=%s;PWD=%s;", SvPV_nolen(uid), SvPV_nolen(pwd));*/ /* if (DBIc_TRACE(imp_dbh, CONNECTION_TRACING, 0, 0)) TRACE1(imp_dbh, "Now using dbname = %s\n", SvPV_nolen(dbname)); */ } if (DBIc_TRACE(imp_dbh, CONNECTION_TRACING, 0, 0)) TRACE2(imp_dbh, " SQLDriverConnect '%s', '%s', 'xxxx'\n", SvPV_nolen(dbname), neatsvpv(uid, 0)); wconstr = sv_mortalcopy(dbname); utf8sv_to_wcharsv(aTHX_ wconstr); /* The following is to work around a bug in SQLDriverConnectW in unixODBC which in at least 2.2.11 (and probably up to 2.2.13 official release [not pre-release]) core dumps if the wide connection string does not end in a 0 (even though it should not matter as we pass the length. */ { char *p; memset(dc_constr, '\0', sizeof(dc_constr)); p = SvPV(wconstr, dc_constr_len); if (dc_constr_len > (sizeof(dc_constr) - 2)) { croak("Cannot process connection string - too long"); } memcpy(dc_constr, p, dc_constr_len); } { SQLWCHAR wout_str[512]; SQLSMALLINT wout_str_len; #ifdef WIN32 if (imp_dbh->odbc_driver_complete) { rc = SQLDriverConnectW(imp_dbh->hdbc, GetConsoleHwnd(), /* no hwnd */ dc_constr, (SQLSMALLINT)(dc_constr_len / sizeof(SQLWCHAR)), wout_str, sizeof(wout_str) / sizeof(wout_str[0]), &wout_str_len, SQL_DRIVER_COMPLETE); } else { #endif rc = SQLDriverConnectW(imp_dbh->hdbc, 0, /* no hwnd */ dc_constr, (SQLSMALLINT)(dc_constr_len / sizeof(SQLWCHAR)), wout_str, sizeof(wout_str) / sizeof(wout_str[0]), &wout_str_len, SQL_DRIVER_NOPROMPT); #ifdef WIN32 } #endif if (SQL_SUCCEEDED(rc)) { imp_dbh->out_connect_string = sv_newwvn(aTHX_ wout_str, wout_str_len); /* if (DBIc_TRACE(imp_dbh, CONNECTION_TRACING, 0, 0)) TRACE1(imp_dbh, "Out connection string: %s\n", SvPV_nolen(imp_dbh->out_connect_string)); */ } } if (!SQL_SUCCEEDED(rc)) { SV *wuid, *wpwd; SQLWCHAR *wuidp, *wpwdp; SQLSMALLINT uid_len, pwd_len; if (DBIc_TRACE(imp_dbh, CONNECTION_TRACING, 0, 0)) TRACE0(imp_dbh, " SQLDriverConnectW failed:\n"); /* * Added code for DBD::ODBC 0.39 to help return a better * error code in the case where the user is using a * DSN-less connection and the dbname doesn't look like a * true DSN. */ if (SvCUR(dbname) > SQL_MAX_DSN_LENGTH || dsnHasDriverOrDSN(SvPV_nolen(dbname))) { /* must be DSN= or some "direct" connection attributes, * probably best to error here and give the user a real * error code because the SQLConnect call could hide the * real problem. */ dbd_error(dbh, rc, "db_login6sv/SQLDriverConnectW"); SQLFreeHandle(SQL_HANDLE_DBC, imp_dbh->hdbc); if (imp_drh->connects == 0) { SQLFreeHandle(SQL_HANDLE_ENV, imp_drh->henv); imp_drh->henv = SQL_NULL_HENV; imp_dbh->henv = SQL_NULL_HENV; } return 0; } /* ok, the DSN is short, so let's try to use it to connect * and quietly take all error messages */ AllODBCErrors(imp_dbh->henv, imp_dbh->hdbc, 0, 0, DBIc_LOGPIO(imp_dbh)); if (DBIc_TRACE(imp_dbh, CONNECTION_TRACING, 0, 0)) TRACE2(imp_dbh, " SQLConnect '%s', '%s'\n", neatsvpv(dbname, 0), neatsvpv(uid, 0)); wconstr = sv_mortalcopy(dbname); utf8sv_to_wcharsv(aTHX_ wconstr); if (SvOK(uid)) { wuid = sv_mortalcopy(uid); utf8sv_to_wcharsv(aTHX_ wuid); wuidp = (SQLWCHAR *)SvPV_nolen(wuid); uid_len = SvCUR(wuid) / sizeof(SQLWCHAR); } else { wuidp = NULL; uid_len = 0; } if (SvOK(pwd)) { wpwd = sv_mortalcopy(pwd); utf8sv_to_wcharsv(aTHX_ wpwd); wpwdp = (SQLWCHAR *)SvPV_nolen(wpwd); pwd_len = SvCUR(wpwd) / sizeof(SQLWCHAR); } else { wpwdp = NULL; pwd_len = 0; } rc = SQLConnectW(imp_dbh->hdbc, (SQLWCHAR *)SvPV_nolen(wconstr), (SQLSMALLINT)(SvCUR(wconstr) / sizeof(SQLWCHAR)), wuidp, uid_len, wpwdp, pwd_len); } if (!SQL_SUCCEEDED(rc)) { dbd_error(dbh, rc, "db_login6sv/SQLConnectW"); SQLFreeHandle(SQL_HANDLE_DBC, imp_dbh->hdbc); imp_dbh->hdbc = SQL_NULL_HDBC; if (imp_drh->connects == 0) { SQLFreeHandle(SQL_HANDLE_ENV, imp_drh->henv); imp_drh->henv = SQL_NULL_HENV; imp_dbh->henv = SQL_NULL_HENV; } return 0; } else if (rc == SQL_SUCCESS_WITH_INFO) { dbd_error(dbh, rc, "db_login6sv/SQLConnectW"); } if (post_connect(aTHX_ dbh, imp_dbh, attr) != 1) return 0; imp_drh->connects++; DBIc_IMPSET_on(imp_dbh); /* imp_dbh set up now */ DBIc_ACTIVE_on(imp_dbh); /* call disconnect before freeing */ return 1; #endif /* WITH_UNICODE */ } /************************************************************************/ /* */ /* dbd_db_login6 */ /* ============= */ /* */ /* A newer version of the dbd_db_login API with the additional attr as */ /* the sixth argument. Once everyone upgrades to at least */ /* DBI 1.60X (where X > 7) this API won't get called anymore since */ /* dbd_db_login6_sv will be favoured. */ /* */ /* NOTE: I had hoped to make dbd_db_login6_sv support Unicode and */ /* dbd_db_login6 to not support Unicode but as no one (except me) has */ /* a DBI which supports dbd_db_login6_sv and unixODBC REQUIRES us to */ /* call SQLDriverConnectW if we are going to call other SQLXXXW */ /* functions later I've got no choice but to convert the ASCII strings */ /* passed to dbd_db_login6 to wide characters when DBD::ODBC is built */ /* for Unicode. */ /* */ /************************************************************************/ int dbd_db_login6( SV *dbh, imp_dbh_t *imp_dbh, char *dbname, char *uid, char *pwd, SV *attr) { dTHX; D_imp_drh_from_dbh; RETCODE rc; char dbname_local[512]; #ifdef WITH_UNICODE SQLWCHAR wconstr[512]; STRLEN wconstr_len; unsigned int i; #endif if (DBIc_TRACE(imp_dbh, CONNECTION_TRACING, 0, 0)) TRACE0(imp_dbh, "dbd_db_login6\n"); if (!imp_drh->connects) { rc = SQLAllocHandle(SQL_HANDLE_ENV, SQL_NULL_HANDLE, &imp_drh->henv); dbd_error(dbh, rc, "db_login6/SQLAllocHandle(env)"); if (!SQL_SUCCEEDED(rc)) return 0; if (set_odbc_version(aTHX_ dbh, imp_dbh, attr) != 1) return 0; } imp_dbh->henv = imp_drh->henv; /* needed for dbd_error */ /* If odbc_trace_file set, set it in ODBC */ { SV **attr_sv; char *file; if ((attr_sv = DBD_ATTRIB_GET_SVP(attr, "odbc_trace_file", (I32)strlen("odbc_trace_file"))) != NULL) { if (SvPOK(*attr_sv)) { file = SvPV_nolen(*attr_sv); rc = SQLSetConnectAttr(NULL, SQL_ATTR_TRACEFILE, file, strlen(file)); if (!SQL_SUCCEEDED(rc)) { warn("Failed to set trace file"); } } } } /* If odbc_trace enabled, turn ODBC tracing on */ { UV dc = 0; SV **svp; DBD_ATTRIB_GET_IV(attr, "odbc_trace", 10, svp, dc); if (svp && dc) { rc = SQLSetConnectAttr(NULL, SQL_ATTR_TRACE, (SQLPOINTER)SQL_OPT_TRACE_ON, 0); if (!SQL_SUCCEEDED(rc)) { warn("Failed to enable tracing"); } } } imp_dbh->out_connect_string = NULL; rc = SQLAllocHandle(SQL_HANDLE_DBC, imp_drh->henv, &imp_dbh->hdbc); if (!SQL_SUCCEEDED(rc)) { dbd_error(dbh, rc, "db_login6/SQLAllocHandle(dbc)"); if (imp_drh->connects == 0) { SQLFreeHandle(SQL_HANDLE_ENV, imp_drh->henv); imp_drh->henv = SQL_NULL_HENV; imp_dbh->henv = SQL_NULL_HENV; /* needed for dbd_error */ } return 0; } #ifndef DBD_ODBC_NO_SQLDRIVERCONNECT /* If the connection string is too long to pass to SQLConnect or it contains DSN or DRIVER, we've little choice to but to call SQLDriverConnect and need to tag the uid/pwd on the end of the connection string (unless they already exist). */ if ((strlen(dbname) > SQL_MAX_DSN_LENGTH || dsnHasDriverOrDSN(dbname)) && !dsnHasUIDorPWD(dbname)) { if ((strlen(dbname) + (uid ? strlen(uid) : 0) + (pwd ? strlen(pwd) : 0) + 12) > sizeof(dbname_local)) { croak("Connection string too long"); } strcpy(dbname_local, dbname); if (uid) { strcat(dbname_local, ";UID="); strcat(dbname_local, uid); } if (pwd) { strcat(dbname_local, ";PWD="); strcat(dbname_local, pwd); } dbname = dbname_local; } if (DBIc_TRACE(imp_dbh, CONNECTION_TRACING, 0, 0)) TRACE2(imp_dbh, " SQLDriverConnect '%s', '%s', 'xxxx'\n", dbname, (uid ? uid : "")); # ifdef WITH_UNICODE if (strlen(dbname) > (sizeof(wconstr) / sizeof(wconstr[0]))) { croak("Connection string too big to convert to wide characters"); } /* The following is a massive simplification assuming only 7-bit ASCII is ever passed to dbd_db_login6 */ for (i = 0; i < strlen(dbname); i++) { wconstr[i] = dbname[i]; } wconstr[i] = 0; wconstr_len = i; { SQLWCHAR wout_str[512]; SQLSMALLINT wout_str_len; rc = SQLDriverConnectW(imp_dbh->hdbc, 0, /* no hwnd */ wconstr, (SQLSMALLINT)wconstr_len, wout_str, sizeof(wout_str) / sizeof(wout_str[0]), &wout_str_len, SQL_DRIVER_NOPROMPT); if (SQL_SUCCEEDED(rc)) { imp_dbh->out_connect_string = sv_newwvn(aTHX_ wout_str, wout_str_len); if (DBIc_TRACE(imp_dbh, CONNECTION_TRACING, 0, 0)) TRACE1(imp_dbh, "Out connection string: %s\n", SvPV_nolen(imp_dbh->out_connect_string)); } } # else /* WITH_UNICODE */ { char out_str[512]; SQLSMALLINT out_str_len; /* Work around a bug in mdbtools where the out connection string length can sometimes be unset. We set it to a ridiculous value and if it remains we know mdbtools did not return it. */ out_str_len = 9999; rc = SQLDriverConnect(imp_dbh->hdbc, 0, /* no hwnd */ dbname, (SQLSMALLINT)strlen(dbname), out_str, sizeof(out_str), &out_str_len, SQL_DRIVER_NOPROMPT); if (SQL_SUCCEEDED(rc)) { if (out_str_len == 9999) { imp_dbh->out_connect_string = newSVpv("", 0); } else { imp_dbh->out_connect_string = newSVpv(out_str, out_str_len); } if (DBIc_TRACE(imp_dbh, CONNECTION_TRACING, 0, 0)) TRACE1(imp_dbh, "Out connection string: %s\n", SvPV_nolen(imp_dbh->out_connect_string)); } } # endif /* WITH_UNICODE */ #else /* if we are using something that can not handle SQLDriverconnect, * then set rc to a not OK state and we'll fall back on SQLConnect */ rc = SQL_ERROR; #endif if (!SQL_SUCCEEDED(rc)) { if (DBIc_TRACE(imp_dbh, DBD_TRACING, 0, 4)) { #ifdef DBD_ODBC_NO_SQLDRIVERCONNECT TRACE0(imp_dbh, " !SQLDriverConnect unsupported.\n"); #else TRACE0(imp_dbh, " SQLDriverConnect failed:\n"); #endif } #ifndef DBD_ODBC_NO_SQLDRIVERCONNECT /* * Added code for DBD::ODBC 0.39 to help return a better * error code in the case where the user is using a * DSN-less connection and the dbname doesn't look like a * true DSN. */ if (strlen(dbname) > SQL_MAX_DSN_LENGTH || dsnHasDriverOrDSN(dbname)) { /* must be DSN= or some "direct" connection attributes, * probably best to error here and give the user a real * error code because the SQLConnect call could hide the * real problem. */ dbd_error(dbh, rc, "db_login/SQLConnect"); SQLFreeHandle(SQL_HANDLE_DBC, imp_dbh->hdbc); if (imp_drh->connects == 0) { SQLFreeHandle(SQL_HANDLE_ENV, imp_drh->henv); imp_drh->henv = SQL_NULL_HENV; imp_dbh->henv = SQL_NULL_HENV; } return 0; } /* ok, the DSN is short, so let's try to use it to connect * and quietly take all error messages */ AllODBCErrors(imp_dbh->henv, imp_dbh->hdbc, 0, 0, DBIc_LOGPIO(imp_dbh)); #endif /* DriverConnect supported */ if (DBIc_TRACE(imp_dbh, CONNECTION_TRACING, 0, 0)) TRACE2(imp_dbh, " SQLConnect '%s', '%s'\n", dbname, (uid ? uid : "")); #ifdef WITH_UNICODE { SQLWCHAR wuid[100], wpwd[100]; SQLSMALLINT uid_len, pwd_len; SQLWCHAR *wuidp, *wpwdp; if (uid) { for (i = 0; i < strlen(uid); i++) { wuid[i] = uid[i]; } wuid[i] = 0; wuidp = wuid; uid_len = strlen(uid); } else { wuidp = NULL; uid_len = 0; } if (pwd) { for (i = 0; i < strlen(pwd); i++) { wpwd[i] = pwd[i]; } wpwd[i] = 0; wpwdp = wpwd; pwd_len = strlen(pwd); } else { wpwdp = NULL; pwd_len = 0; } for (i = 0; i < strlen(dbname); i++) { wconstr[i] = dbname[i]; } wconstr[i] = 0; wconstr_len = i; rc = SQLConnectW(imp_dbh->hdbc, wconstr, wconstr_len, wuidp, uid_len, wpwdp, pwd_len); } #else rc = SQLConnect(imp_dbh->hdbc, dbname, (SQLSMALLINT)strlen(dbname), uid, (SQLSMALLINT)(uid ? strlen(uid) : 0), pwd, (SQLSMALLINT)(pwd ? strlen(pwd) : 0)); #endif } if (!SQL_SUCCEEDED(rc)) { dbd_error(dbh, rc, "db_login6/SQLConnect"); SQLFreeHandle(SQL_HANDLE_DBC, imp_dbh->hdbc); if (imp_drh->connects == 0) { SQLFreeHandle(SQL_HANDLE_ENV, imp_drh->henv); imp_drh->henv = SQL_NULL_HENV; imp_dbh->henv = SQL_NULL_HENV; } return 0; } else if (rc == SQL_SUCCESS_WITH_INFO) { dbd_error(dbh, rc, "db_login6/SQLConnect"); } if (post_connect(aTHX_ dbh, imp_dbh, attr) != 1) return 0; imp_drh->connects++; DBIc_IMPSET_on(imp_dbh); /* imp_dbh set up now */ DBIc_ACTIVE_on(imp_dbh); /* call disconnect before freeing */ return 1; } int dbd_db_disconnect(SV *dbh, imp_dbh_t *imp_dbh) { dTHX; RETCODE rc; D_imp_drh_from_dbh; SQLUINTEGER autoCommit = SQL_AUTOCOMMIT_OFF; /* We assume that disconnect will always work */ /* since most errors imply already disconnected. */ DBIc_ACTIVE_off(imp_dbh); if (imp_dbh->out_connect_string) { SvREFCNT_dec(imp_dbh->out_connect_string); } rc = SQLGetConnectAttr( imp_dbh->hdbc, SQL_ATTR_AUTOCOMMIT, &autoCommit, SQL_IS_UINTEGER, 0); if (!SQL_SUCCEEDED(rc)) { /* quietly handle a problem with SQLGetConnectAttr() */ AllODBCErrors(imp_dbh->henv, imp_dbh->hdbc, 0, DBIc_TRACE(imp_dbh, DBD_TRACING, 0, 4), DBIc_LOGPIO(imp_dbh)); } rc = SQLDisconnect(imp_dbh->hdbc); if (!SQL_SUCCEEDED(rc)) { char state[SQL_SQLSTATE_SIZE+1]; (void)SQLGetDiagField(SQL_HANDLE_DBC, imp_dbh->hdbc, 1, SQL_DIAG_SQLSTATE, (SQLCHAR *)state, sizeof(state), NULL); if (strcmp(state, "25000") == 0) { if (DBIc_TRACE(imp_dbh, TRANSACTION_TRACING, 0, 3)) TRACE0(imp_dbh, "SQLDisconnect, Transaction in progress\n"); DBIh_SET_ERR_CHAR( dbh, (imp_xxh_t*)imp_dbh, "0" /* warning state */, 1, "Disconnect with transaction in progress - rolling back", state, Nullch); (void)dbd_db_rollback(dbh, imp_dbh); rc = SQLDisconnect(imp_dbh->hdbc); } if (!SQL_SUCCEEDED(rc)) { dbd_error(dbh, rc, "db_disconnect/SQLDisconnect"); /* if disconnect fails, fall through. Probably not disconnected */ } } if (DBIc_TRACE(imp_dbh, CONNECTION_TRACING, 0, 0)) TRACE1(imp_dbh, "SQLDisconnect=%d\n", rc); SQLFreeHandle(SQL_HANDLE_DBC, imp_dbh->hdbc); imp_dbh->hdbc = SQL_NULL_HDBC; imp_drh->connects--; strcpy(imp_dbh->odbc_dbms_name, "disconnect"); if (imp_drh->connects == 0) { SQLFreeHandle(SQL_HANDLE_ENV, imp_drh->henv); imp_drh->henv = SQL_NULL_HENV; imp_dbh->henv = SQL_NULL_HENV; } /* We don't free imp_dbh since a reference still exists */ /* The DESTROY method is the only one to 'free' memory. */ /* Note that statement objects may still exist for this dbh! */ return 1; } int dbd_db_commit(SV *dbh, imp_dbh_t *imp_dbh) { RETCODE rc; rc = SQLEndTran(SQL_HANDLE_DBC, imp_dbh->hdbc, SQL_COMMIT); if (!SQL_SUCCEEDED(rc)) { dbd_error(dbh, rc, "db_commit/SQLEndTran"); return 0; } /* support for DBI 1.20 begin_work */ if (DBIc_has(imp_dbh, DBIcf_BegunWork)) { /* reset autocommit */ rc = SQLSetConnectAttr( imp_dbh->hdbc, SQL_ATTR_AUTOCOMMIT, (SQLPOINTER)SQL_AUTOCOMMIT_ON, 0); DBIc_on(imp_dbh,DBIcf_AutoCommit); DBIc_off(imp_dbh,DBIcf_BegunWork); } return 1; } int dbd_db_rollback(SV *dbh, imp_dbh_t *imp_dbh) { RETCODE rc; rc = SQLEndTran(SQL_HANDLE_DBC, imp_dbh->hdbc, SQL_ROLLBACK); if (!SQL_SUCCEEDED(rc)) { dbd_error(dbh, rc, "db_rollback/SQLEndTran"); return 0; } /* support for DBI 1.20 begin_work */ if (DBIc_has(imp_dbh, DBIcf_BegunWork)) { /* reset autocommit */ rc = SQLSetConnectAttr( imp_dbh->hdbc, SQL_ATTR_AUTOCOMMIT, (SQLPOINTER)SQL_AUTOCOMMIT_ON, 0); DBIc_on(imp_dbh,DBIcf_AutoCommit); DBIc_off(imp_dbh,DBIcf_BegunWork); } return 1; } void dbd_error2( SV *h, RETCODE err_rc, char *what, HENV henv, HDBC hdbc, HSTMT hstmt) { dTHX; D_imp_xxh(h); int error_found = 0; /* * It's a shame to have to add all this stuff with imp_dbh and * imp_sth, but imp_dbh is needed to get the odbc_err_handler * and imp_sth is needed to get imp_dbh. */ struct imp_dbh_st *imp_dbh = NULL; struct imp_sth_st *imp_sth = NULL; if (err_rc == SQL_SUCCESS) return; if (DBIc_TRACE(imp_xxh, DBD_TRACING, 0, 4) && (err_rc != SQL_SUCCESS)) { PerlIO_printf( DBIc_LOGPIO(imp_xxh), " !!dbd_error2(err_rc=%d, what=%s, handles=(%p,%p,%p)\n", err_rc, (what ? what : "null"), henv, hdbc, hstmt); } switch(DBIc_TYPE(imp_xxh)) { case DBIt_ST: imp_sth = (struct imp_sth_st *)(imp_xxh); imp_dbh = (struct imp_dbh_st *)(DBIc_PARENT_COM(imp_sth)); break; case DBIt_DB: imp_dbh = (struct imp_dbh_st *)(imp_xxh); break; default: croak("panic: dbd_error2 on bad handle type"); } while(henv != SQL_NULL_HENV) { SQLCHAR sqlstate[SQL_SQLSTATE_SIZE+1]; /* * ODBC spec says ErrorMsg must not be greater than * SQL_MAX_MESSAGE_LENGTH but we concatenate a little * on the end later (e.g. sql state) so make room for more. */ SQLCHAR ErrorMsg[SQL_MAX_MESSAGE_LENGTH+512]; SQLSMALLINT ErrorMsgLen; SQLINTEGER NativeError; RETCODE rc = 0; /* TBD: 3.0 update */ /* It is important we check for DBDODBC_INTERNAL_ERROR first so if we issue an internal error AND there are ODBC diagnostics, ours come first */ while(err_rc == DBDODBC_INTERNAL_ERROR || SQL_SUCCEEDED(rc=SQLError( henv, hdbc, hstmt, sqlstate, &NativeError, ErrorMsg, sizeof(ErrorMsg)-1, &ErrorMsgLen))) { error_found = 1; if (err_rc == DBDODBC_INTERNAL_ERROR) { strcpy(ErrorMsg, what); strcpy(sqlstate, "HY000"); NativeError = 1; err_rc = SQL_ERROR; } else { ErrorMsg[ErrorMsgLen] = '\0'; sqlstate[SQL_SQLSTATE_SIZE] = '\0'; } if (DBIc_TRACE(imp_dbh, DBD_TRACING, 0, 3)) { PerlIO_printf(DBIc_LOGPIO(imp_dbh), " !SQLError(%p,%p,%p) = " "(%s, %ld, %s)\n", henv, hdbc, hstmt, sqlstate, (long)NativeError, ErrorMsg); } /* * If there's an error handler, run it and see what it returns... * (lifted from DBD:Sybase 0.21) */ if(imp_dbh->odbc_err_handler) { dSP; int retval, count; ENTER; SAVETMPS; PUSHMARK(sp); if (DBIc_TRACE(imp_dbh, DBD_TRACING, 0, 3)) TRACE0(imp_dbh, " Calling error handler\n"); /* * Here are the args to the error handler routine: * 1. sqlstate (string) * 2. ErrorMsg (string) * 3. NativeError (integer) * That's it for now... */ XPUSHs(sv_2mortal(newSVpv(sqlstate, 0))); XPUSHs(sv_2mortal(newSVpv(ErrorMsg, 0))); XPUSHs(sv_2mortal(newSViv(NativeError))); XPUSHs(sv_2mortal(newSViv(err_rc))); PUTBACK; if((count = call_sv(imp_dbh->odbc_err_handler, G_SCALAR)) != 1) croak("An error handler can't return a LIST."); SPAGAIN; retval = POPi; PUTBACK; FREETMPS; LEAVE; /* If the called sub returns 0 then ignore this error */ if(retval == 0) { if (DBIc_TRACE(imp_dbh, DBD_TRACING, 0, 3)) TRACE0(imp_dbh, " Handler caused error to be ignored\n"); continue; } } strcat(ErrorMsg, " (SQL-"); strcat(ErrorMsg, sqlstate); strcat(ErrorMsg, ")"); if (SQL_SUCCEEDED(err_rc)) { DBIh_SET_ERR_CHAR(h, imp_xxh, "" /* information state */, 1, ErrorMsg, sqlstate, Nullch); } else { DBIh_SET_ERR_CHAR(h, imp_xxh, Nullch, 1, ErrorMsg, sqlstate, Nullch); } continue; } if (rc != SQL_NO_DATA_FOUND) { /* should never happen */ if (DBIc_TRACE(imp_xxh, DBD_TRACING, 0, 3)) TRACE1(imp_dbh, " !!SQLError returned %d unexpectedly.\n", rc); if (!PL_dirty) { /* not in global destruction */ DBIh_SET_ERR_CHAR( h, imp_xxh, Nullch, 1, " Unable to fetch information about the error", "IM008", Nullch); } } /* climb up the tree each time round the loop */ if (hstmt != SQL_NULL_HSTMT) hstmt = SQL_NULL_HSTMT; else if (hdbc != SQL_NULL_HDBC) hdbc = SQL_NULL_HDBC; else henv = SQL_NULL_HENV; /* done the top */ } /* some broken drivers may return an error and then not provide an error message */ if (!error_found && (err_rc != SQL_NO_DATA_FOUND)) { /* DON'T REMOVE "No error found" from the string below people rely on it as the state was IM008 and I changed it to HY000 */ if (DBIc_TRACE(imp_xxh, DBD_TRACING, 0, 3)) TRACE1(imp_dbh, " ** No error found %d **\n", err_rc); DBIh_SET_ERR_CHAR( h, imp_xxh, Nullch, 1, " Unable to fetch information about the error", "HY000", Nullch); } } /*------------------------------------------------------------ empties entire ODBC error queue. ------------------------------------------------------------*/ void dbd_error(SV *h, RETCODE err_rc, char *what) { dTHX; D_imp_xxh(h); struct imp_dbh_st *imp_dbh = NULL; struct imp_sth_st *imp_sth = NULL; HSTMT hstmt = SQL_NULL_HSTMT; switch(DBIc_TYPE(imp_xxh)) { case DBIt_ST: imp_sth = (struct imp_sth_st *)(imp_xxh); imp_dbh = (struct imp_dbh_st *)(DBIc_PARENT_COM(imp_sth)); hstmt = imp_sth->hstmt; break; case DBIt_DB: imp_dbh = (struct imp_dbh_st *)(imp_xxh); break; default: croak("panic: dbd_error on bad handle type"); } /* * If status is SQL_SUCCESS, there's no error, so we can just return. * There may be status or other non-error messsages though. * We want those messages if the debug level is set to at least 3. * If an error handler is installed, let it decide what messages * should or shouldn't be reported. */ if ((err_rc == SQL_SUCCESS) && !DBIc_TRACE(imp_dbh, DBD_TRACING, 0, 3) && !imp_dbh->odbc_err_handler) return; dbd_error2(h, err_rc, what, imp_dbh->henv, imp_dbh->hdbc, hstmt); } /*------------------------------------------------------------------------- dbd_preparse: - scan for placeholders (? and :xx style) and convert them to ?. - builds translation table to convert positional parameters of the execute() call to :nn type placeholders. We need two data structures to translate this stuff: - a hash to convert positional parameters to placeholders - an array, representing the actual '?' query parameters. %param = (name1=>plh1, name2=>plh2, ..., name_n=>plh_n) # @qm_param = (\$param{'name1'}, \$param{'name2'}, ...) -------------------------------------------------------------------------*/ void dbd_preparse(imp_sth_t *imp_sth, char *statement) { dTHX; enum STATES {DEFAULT, LITERAL, COMMENT, LINE_COMMENT}; enum STATES state = DEFAULT; enum STYLES { STYLE_NONE, /* no style 0 */ STYLE_NUMBER, /* :N 1 */ STYLE_NAME, /* :name 2*/ STYLE_NORMAL /* ? 3 */ }; char literal_ch = '\0'; char *src, *dest; /* input and output SQL */ phs_t phs_tpl; int idx=0; /* parameter number */ enum STYLES style = STYLE_NONE; /* type of parameter */ enum STYLES laststyle = STYLE_NONE; /* last type of parameter */ imp_sth->statement = (char*)safemalloc(strlen(statement)+1); /* initialize phs ready to be cloned per placeholder */ memset(&phs_tpl, 0, sizeof(phs_tpl)); phs_tpl.value_type = SQL_C_CHAR; phs_tpl.sv = &PL_sv_undef; src = statement; dest = imp_sth->statement; if (DBIc_TRACE(imp_sth, DBD_TRACING, 0, 5)) { TRACE1(imp_sth, " ignore named placeholders = %d\n", imp_sth->odbc_ignore_named_placeholders); } while(*src) { enum STATES next_state = state; switch (state) { case DEFAULT: { if ((*src == '\'') || (*src == '"')) { literal_ch = *src; /* save quote chr */ next_state = LITERAL; } else if ((*src == '/') && (*(src + 1) == '*')) { next_state = COMMENT; /* in comment */ } else if ((*src == '-') && (*(src + 1) == '-')) { next_state = LINE_COMMENT; /* in line comment */ } else if ((*src == '?') || (*src == ':')) { STRLEN namelen; char name[256]; /* current named parameter */ SV **svpp; char ch; ch = *src++; if (ch == '?') { /* X/Open standard */ idx++; my_snprintf(name, sizeof(name), "%d", idx); *dest++ = ch; style = STYLE_NORMAL; } else if (isDIGIT(*src)) { /* ':1' */ char *p = name; *dest++ = '?'; idx = atoi(src); while(isDIGIT(*src)) *p++ = *src++; *p = 0; style = STYLE_NUMBER; if (DBIc_TRACE(imp_sth, DBD_TRACING, 0, 5)) TRACE1(imp_sth, " found numbered parameter = %s\n", name); } else if (!imp_sth->odbc_ignore_named_placeholders && isALNUM(*src)) { /* ':foo' is valid, only if we are not ignoring named parameters */ char *p = name; idx++; *dest++ = '?'; while(isALNUM(*src)) /* includes '_' */ *p++ = *src++; *p = 0; style = STYLE_NAME; if (DBIc_TRACE(imp_sth, DBD_TRACING, 0, 5)) TRACE1(imp_sth, " found named parameter = %s\n", name); } else { /* perhaps ':=' PL/SQL construct */ *dest++ = ch; continue; } *dest = '\0'; /* handy for debugging */ if (laststyle && style != laststyle) croak("Can't mix placeholder styles (%d/%d)", style,laststyle); laststyle = style; if (imp_sth->all_params_hv == NULL) imp_sth->all_params_hv = newHV(); namelen = strlen(name); svpp = hv_fetch(imp_sth->all_params_hv, name, (I32)namelen, 0); if (svpp == NULL) { SV *phs_sv; phs_t *phs; if (DBIc_TRACE(imp_sth, DBD_TRACING, 0, 5)) TRACE2(imp_sth, " creating new parameter key %s, index %d\n", name, idx); /* create SV holding the placeholder */ phs_sv = newSVpv((char*)&phs_tpl, sizeof(phs_tpl)+namelen+1); phs = (phs_t*)SvPVX(phs_sv); strcpy(phs->name, name); phs->idx = idx; /* store placeholder to all_params_hv */ svpp = hv_store(imp_sth->all_params_hv, name, (I32)namelen, phs_sv, 0); } else { if (DBIc_TRACE(imp_sth, DBD_TRACING, 0, 5)) TRACE1(imp_sth, " parameter key %s already exists\n", name); croak("DBD::ODBC does not yet support binding a named parameter more than once\n"); } break; } *dest++ = *src++; break; } case LITERAL: { if (*src == literal_ch) { next_state = DEFAULT; } *dest++ = *src++; break; } case COMMENT: { if ((*(src - 1) == '*') && (*src == '/')) { next_state = DEFAULT; } *dest++ = *src++; break; } case LINE_COMMENT: { if (*src == '\n') { next_state = DEFAULT; } *dest++ = *src++; break; } } state = next_state; } *dest = '\0'; if (imp_sth->all_params_hv) { DBIc_NUM_PARAMS(imp_sth) = (int)HvKEYS(imp_sth->all_params_hv); if (DBIc_TRACE(imp_sth, DBD_TRACING, 0, 4)) TRACE1(imp_sth, " dbd_preparse scanned %d distinct placeholders\n", (int)DBIc_NUM_PARAMS(imp_sth)); } } int dbd_st_tables( SV *dbh, SV *sth, SV *catalog, SV *schema, SV *table, SV *table_type) { dTHX; D_imp_dbh(dbh); D_imp_sth(sth); RETCODE rc; int dbh_active; size_t max_stmt_len; char *acatalog = NULL; char *aschema = NULL; char *atable = NULL; char *atype = NULL; imp_sth->henv = imp_dbh->henv; imp_sth->hdbc = imp_dbh->hdbc; imp_sth->done_desc = 0; if (DBIc_TRACE(imp_sth, DBD_TRACING, 0, 3)) PerlIO_printf(DBIc_LOGPIO(imp_sth), "dbd_st_tables(%s,%s,%s,%s)\n", SvOK(catalog) ? SvPV_nolen(catalog) : "undef", (schema && SvOK(schema)) ? SvPV_nolen(schema) : "undef", (table && SvOK(table)) ? SvPV_nolen(table) : "undef", (table_type && SvOK(table_type)) ? SvPV_nolen(table_type) : "undef"); if ((dbh_active = check_connection_active(aTHX_ dbh)) == 0) return 0; rc = SQLAllocHandle(SQL_HANDLE_STMT, imp_dbh->hdbc, &imp_sth->hstmt); if (rc != SQL_SUCCESS) { dbd_error(sth, rc, "st_tables/SQLAllocHandle(stmt)"); return 0; } if (SvOK(catalog)) acatalog = SvPV_nolen(catalog); if (!imp_dbh->catalogs_supported) { acatalog = NULL; *catalog = PL_sv_undef; } if (SvOK(schema)) aschema = SvPV_nolen(schema); if (!imp_dbh->schema_usage) { aschema = NULL; *schema = PL_sv_undef; } if (SvOK(table)) atable = SvPV_nolen(table); if (SvOK(table_type)) atype = SvPV_nolen(table_type); max_stmt_len = strlen(cSqlTables)+ strlen(XXSAFECHAR(acatalog)) + strlen(XXSAFECHAR(aschema)) + strlen(XXSAFECHAR(atable)) + strlen(XXSAFECHAR(atype))+1; imp_sth->statement = (char *)safemalloc(max_stmt_len); my_snprintf(imp_sth->statement, max_stmt_len, cSqlTables, XXSAFECHAR(acatalog), XXSAFECHAR(aschema), XXSAFECHAR(atable), XXSAFECHAR(atype)); #ifdef WITH_UNICODE { SQLWCHAR *wcatalog = NULL; SQLWCHAR *wschema = NULL; SQLWCHAR *wtable = NULL; SQLWCHAR *wtype = NULL; STRLEN wlen; SV *copy; if (SvOK(catalog)) { /*printf("CATALOG OK %"IVdf" /%s/\n", SvCUR(catalog), SvPV_nolen(catalog));*/ copy = sv_mortalcopy(catalog); SV_toWCHAR(aTHX_ copy); wcatalog = (SQLWCHAR *)SvPV(copy, wlen); } if (SvOK(schema)) { copy = sv_mortalcopy(schema); SV_toWCHAR(aTHX_ copy); wschema = (SQLWCHAR *)SvPV(copy, wlen); } if (SvOK(table)) { copy = sv_mortalcopy(table); SV_toWCHAR(aTHX_ copy); wtable = (SQLWCHAR *)SvPV(copy, wlen); } if (SvOK(table_type)) { copy = sv_mortalcopy(table_type); SV_toWCHAR(aTHX_ copy); wtype = (SQLWCHAR *)SvPV(copy, wlen); } /* printf("wcatalog = %p\n", wcatalog); for (i = 0; i < 10; i++) { printf("%d\n", wcatalog[i]); } */ rc = SQLTablesW(imp_sth->hstmt, wcatalog ? wcatalog : NULL, SQL_NTS, wschema ? wschema : NULL, SQL_NTS, wtable ? wtable : NULL, SQL_NTS, wtype ? wtype : NULL, SQL_NTS /* type (view, table, etc) */ ); } #else { rc = SQLTables(imp_sth->hstmt, acatalog ? acatalog : NULL, SQL_NTS, aschema ? aschema : NULL, SQL_NTS, atable ? atable : NULL, SQL_NTS, atype ? atype : NULL, SQL_NTS /* type (view, table, etc) */ ); } #endif /* WITH_UNICODE */ if (DBIc_TRACE(imp_sth, DBD_TRACING, 0, 4)) TRACE2(imp_dbh, " SQLTables=%d (type=%s)\n", rc, atype ? atype : "(null)"); dbd_error(sth, rc, "st_tables/SQLTables"); if (!SQL_SUCCEEDED(rc)) { SQLFreeHandle(SQL_HANDLE_STMT,imp_sth->hstmt); imp_sth->hstmt = SQL_NULL_HSTMT; return 0; } return build_results(aTHX_ sth, imp_sth, dbh, imp_dbh, rc); } #ifdef OLD_ONE_BEFORE_SCALARS int dbd_st_tables( SV *dbh, SV *sth, char *catalog, char *schema, char *table, char *table_type) { dTHX; D_imp_dbh(dbh); D_imp_sth(sth); RETCODE rc; int dbh_active; size_t max_stmt_len; imp_sth->henv = imp_dbh->henv; imp_sth->hdbc = imp_dbh->hdbc; imp_sth->done_desc = 0; if ((dbh_active = check_connection_active(aTHX_ dbh)) == 0) return 0; rc = SQLAllocHandle(SQL_HANDLE_STMT, imp_dbh->hdbc, &imp_sth->hstmt); if (rc != SQL_SUCCESS) { dbd_error(sth, rc, "st_tables/SQLAllocHandle(stmt)"); return 0; } max_stmt_len = strlen(cSqlTables)+ strlen(XXSAFECHAR(catalog)) + strlen(XXSAFECHAR(schema)) + strlen(XXSAFECHAR(table)) + strlen(XXSAFECHAR(table_type))+1; imp_sth->statement = (char *)safemalloc(max_stmt_len); my_snprintf(imp_sth->statement, max_stmt_len, cSqlTables, XXSAFECHAR(catalog), XXSAFECHAR(schema), XXSAFECHAR(table), XXSAFECHAR(table_type)); rc = SQLTables(imp_sth->hstmt, (catalog && *catalog) ? catalog : 0, SQL_NTS, (schema && *schema) ? schema : 0, SQL_NTS, (table && *table) ? table : 0, SQL_NTS, table_type && *table_type ? table_type : 0, SQL_NTS /* type (view, table, etc) */ ); if (DBIc_TRACE(imp_sth, DBD_TRACING, 0, 4)) TRACE2(imp_dbh, " Tables result %d (%s)\n", rc, table_type ? table_type : "(null)"); dbd_error(sth, rc, "st_tables/SQLTables"); if (!SQL_SUCCEEDED(rc)) { SQLFreeHandle(SQL_HANDLE_STMT,imp_sth->hstmt); imp_sth->hstmt = SQL_NULL_HSTMT; return 0; } return build_results(aTHX_ sth, imp_sth, dbh, imp_dbh, rc); } #endif /* OLD_ONE_BEFORE_SCALARS */ int dbd_st_primary_keys( SV *dbh, SV *sth, char *catalog, char *schema, char *table) { dTHX; D_imp_dbh(dbh); D_imp_sth(sth); RETCODE rc; int dbh_active; size_t max_stmt_len; imp_sth->henv = imp_dbh->henv; imp_sth->hdbc = imp_dbh->hdbc; imp_sth->done_desc = 0; if ((dbh_active = check_connection_active(aTHX_ dbh)) == 0) return 0; rc = SQLAllocHandle(SQL_HANDLE_STMT, imp_dbh->hdbc, &imp_sth->hstmt); if (rc != SQL_SUCCESS) { dbd_error(sth, rc, "odbc_db_primary_key_info/SQLAllocHandle(stmt)"); return 0; } /* just for sanity, later. Any internals that may rely on this (including */ /* debugging) will have valid data */ max_stmt_len = strlen(cSqlPrimaryKeys)+ strlen(XXSAFECHAR(catalog))+ strlen(XXSAFECHAR(schema))+ strlen(XXSAFECHAR(table))+1; imp_sth->statement = (char *)safemalloc(max_stmt_len); my_snprintf(imp_sth->statement, max_stmt_len, cSqlPrimaryKeys, XXSAFECHAR(catalog), XXSAFECHAR(schema), XXSAFECHAR(table)); rc = SQLPrimaryKeys(imp_sth->hstmt, (catalog && *catalog) ? catalog : 0, SQL_NTS, (schema && *schema) ? schema : 0, SQL_NTS, (table && *table) ? table : 0, SQL_NTS); if (DBIc_TRACE(imp_sth, DBD_TRACING, 0, 4)) PerlIO_printf( DBIc_LOGPIO(imp_dbh), " SQLPrimaryKeys call: cat = %s, schema = %s, table = %s\n", XXSAFECHAR(catalog), XXSAFECHAR(schema), XXSAFECHAR(table)); dbd_error(sth, rc, "st_primary_key_info/SQLPrimaryKeys"); if (!SQL_SUCCEEDED(rc)) { SQLFreeHandle(SQL_HANDLE_STMT,imp_sth->hstmt); imp_sth->hstmt = SQL_NULL_HSTMT; return 0; } return build_results(aTHX_ sth, imp_sth, dbh, imp_dbh, rc); } int dbd_st_statistics( SV *dbh, SV *sth, char *catalog, char *schema, char *table, int unique, int quick) { dTHX; D_imp_dbh(dbh); D_imp_sth(sth); RETCODE rc; int dbh_active; SQLUSMALLINT odbc_unique; SQLUSMALLINT odbc_quick; size_t max_stmt_len; imp_sth->henv = imp_dbh->henv; imp_sth->hdbc = imp_dbh->hdbc; imp_sth->done_desc = 0; if ((dbh_active = check_connection_active(aTHX_ dbh)) == 0) return 0; rc = SQLAllocHandle(SQL_HANDLE_STMT, imp_dbh->hdbc, &imp_sth->hstmt); if (rc != SQL_SUCCESS) { dbd_error(sth, rc, "odbc_db_primary_key_info/SQLAllocHandle(stmt)"); return 0; } odbc_unique = (unique ? SQL_INDEX_UNIQUE : SQL_INDEX_ALL); odbc_quick = (quick ? SQL_QUICK : SQL_ENSURE); /* just for sanity, later. Any internals that may rely on this (including */ /* debugging) will have valid data */ max_stmt_len = strlen(cSqlStatistics)+ strlen(XXSAFECHAR(catalog))+ strlen(XXSAFECHAR(schema))+ strlen(XXSAFECHAR(table))+1; imp_sth->statement = (char *)safemalloc(max_stmt_len); my_snprintf(imp_sth->statement, max_stmt_len, cSqlStatistics, XXSAFECHAR(catalog), XXSAFECHAR(schema), XXSAFECHAR(table), unique, quick); rc = SQLStatistics(imp_sth->hstmt, (catalog && *catalog) ? catalog : 0, SQL_NTS, (schema && *schema) ? schema : 0, SQL_NTS, (table && *table) ? table : 0, SQL_NTS, odbc_unique, odbc_quick); if (DBIc_TRACE(imp_sth, DBD_TRACING, 0, 4)) { PerlIO_printf( DBIc_LOGPIO(imp_dbh), " SQLStatistics call: cat = %s, schema = %s, table = %s" ", unique=%d, quick = %d\n", XXSAFECHAR(catalog), XXSAFECHAR(schema), XXSAFECHAR(table), odbc_unique, odbc_quick); } dbd_error(sth, rc, "st_statistics/SQLStatistics"); if (!SQL_SUCCEEDED(rc)) { SQLFreeHandle(SQL_HANDLE_STMT,imp_sth->hstmt); imp_sth->hstmt = SQL_NULL_HSTMT; return 0; } return build_results(aTHX_ sth, imp_sth, dbh, imp_dbh, rc); } /************************************************************************/ /* */ /* odbc_st_prepare */ /* =============== */ /* */ /* dbd_st_prepare is the old API which is now replaced with */ /* dbd_st_prepare_sv (taking a perl scalar) so this is now: */ /* */ /* a) just a wrapper around dbd_st_prepare_sv and */ /* b) not used - see ODBC.c */ /* */ /************************************************************************/ int odbc_st_prepare( SV *sth, imp_sth_t *imp_sth, char *statement, SV *attribs) { dTHX; SV *sql; sql = sv_newmortal(); sv_setpvn(sql, statement, strlen(statement)); return dbd_st_prepare_sv(sth, imp_sth, sql, attribs); } /************************************************************************/ /* */ /* odbc_st_prepare_sv */ /* ================== */ /* */ /* dbd_st_prepare_sv is the newer version of dbd_st_prepare taking a */ /* a perl scalar for the sql statement instead of a char* so it may be */ /* unicode */ /* */ /************************************************************************/ int odbc_st_prepare_sv( SV *sth, imp_sth_t *imp_sth, SV *statement, SV *attribs) { dTHX; D_imp_dbh_from_sth; RETCODE rc; int dbh_active; char *sql; sql = SvPV_nolen(statement); imp_sth->done_desc = 0; imp_sth->henv = imp_dbh->henv; imp_sth->hdbc = imp_dbh->hdbc; /* inherit from connection */ imp_sth->odbc_ignore_named_placeholders = imp_dbh->odbc_ignore_named_placeholders; imp_sth->odbc_default_bind_type = imp_dbh->odbc_default_bind_type; imp_sth->odbc_force_bind_type = imp_dbh->odbc_force_bind_type; imp_sth->odbc_force_rebind = imp_dbh->odbc_force_rebind; imp_sth->odbc_query_timeout = imp_dbh->odbc_query_timeout; imp_sth->odbc_putdata_start = imp_dbh->odbc_putdata_start; imp_sth->odbc_column_display_size = imp_dbh->odbc_column_display_size; imp_sth->odbc_utf8_on = imp_dbh->odbc_utf8_on; imp_sth->odbc_exec_direct = imp_dbh->odbc_exec_direct; imp_sth->odbc_describe_parameters = imp_dbh->odbc_describe_parameters; imp_sth->odbc_batch_size = imp_dbh->odbc_batch_size; imp_sth->odbc_array_operations = imp_dbh->odbc_array_operations; imp_sth->param_status_array = NULL; if (DBIc_TRACE(imp_dbh, DBD_TRACING, 0, 5)) { TRACE1(imp_dbh, " initializing sth query timeout to %ld\n", (long)imp_dbh->odbc_query_timeout); } if ((dbh_active = check_connection_active(aTHX_ sth)) == 0) return 0; rc = SQLAllocHandle(SQL_HANDLE_STMT, imp_dbh->hdbc, &imp_sth->hstmt); if (!SQL_SUCCEEDED(rc)) { dbd_error(sth, rc, "st_prepare/SQLAllocHandle(stmt)"); return 0; } { /* * allow setting of odbc_execdirect in prepare() or overriding */ SV **attr_sv; /* if the attribute is there, let it override what the default * value from the dbh is (set above). * NOTE: * There are unfortunately two possible attributes because of an early * typo in DBD::ODBC which we keep for backwards compatibility. */ if ((attr_sv = DBD_ATTRIB_GET_SVP(attribs, "odbc_execdirect", (I32)strlen("odbc_execdirect"))) != NULL) { imp_sth->odbc_exec_direct = SvIV(*attr_sv) != 0; } if ((attr_sv = DBD_ATTRIB_GET_SVP(attribs, "odbc_exec_direct", (I32)strlen("odbc_exec_direct"))) != NULL) { imp_sth->odbc_exec_direct = SvIV(*attr_sv) != 0; } } { /* * allow setting of odbc_describe_parameters in prepare() or overriding */ SV **attr_sv; /* if the attribute is there, let it override what the default * value from the dbh is (set above). */ if ((attr_sv = DBD_ATTRIB_GET_SVP( attribs, "odbc_describe_parameters", (I32)strlen("odbc_describe_parameters"))) != NULL) { imp_sth->odbc_describe_parameters = SvIV(*attr_sv) != 0; } } { /* MS SQL Server query notification */ SV **attr_sv; if ((attr_sv = DBD_ATTRIB_GET_SVP( attribs, "odbc_qn_msgtxt", (I32)strlen("odbc_qn_msgtxt"))) != NULL) { rc = SQLSetStmtAttr(imp_sth->hstmt, 1234 /*SQL_SOPT_SS_QUERYNOTIFICATION_MSGTEXT*/, (SQLPOINTER)SvPV_nolen(*attr_sv), SQL_NTS); if (!SQL_SUCCEEDED(rc)) { dbd_error(sth, rc, "SQLSetStmtAttr(QUERYNOTIFICATION_MSGTXT)"); SQLFreeHandle(SQL_HANDLE_STMT, imp_sth->hstmt); imp_sth->hstmt = SQL_NULL_HSTMT; return 0; } } if ((attr_sv = DBD_ATTRIB_GET_SVP( attribs, "odbc_qn_options", (I32)strlen("odbc_qn_options"))) != NULL) { rc = SQLSetStmtAttr(imp_sth->hstmt, 1235 /*SQL_SOPT_SS_QUERYNOTIFICATION_OPTIONS*/, (SQLPOINTER)SvPV_nolen(*attr_sv), SQL_NTS); if (!SQL_SUCCEEDED(rc)) { dbd_error(sth, rc, "SQLSetStmtAttr(QUERYNOTIFICATION_OPTIONS)"); SQLFreeHandle(SQL_HANDLE_STMT, imp_sth->hstmt); imp_sth->hstmt = SQL_NULL_HSTMT; return 0; } } if ((attr_sv = DBD_ATTRIB_GET_SVP( attribs, "odbc_qn_timeout", (I32)strlen("odbc_qn_timeout"))) != NULL) { rc = SQLSetStmtAttr(imp_sth->hstmt, 1233 /*SQL_SOPT_SS_QUERYNOTIFICATION_TIMEOUT*/, (SQLPOINTER)SvIV(*attr_sv), SQL_NTS); if (!SQL_SUCCEEDED(rc)) { dbd_error(sth, rc, "SQLSetStmtAttr(QUERYNOTIFICATION_TIMEOUT)"); SQLFreeHandle(SQL_HANDLE_STMT, imp_sth->hstmt); imp_sth->hstmt = SQL_NULL_HSTMT; return 0; } } } /* scan statement for '?', ':1' and/or ':foo' style placeholders */ dbd_preparse(imp_sth, sql); /* Hold this statement for subsequent call of dbd_execute */ if (!imp_sth->odbc_exec_direct) { if (DBIc_TRACE(imp_dbh, SQL_TRACING, 0, 3)) { TRACE1(imp_dbh, " SQLPrepare %s\n", imp_sth->statement); } #ifdef WITH_UNICODE if (SvOK(statement) && DO_UTF8(statement)) { SQLWCHAR *wsql; STRLEN wsql_len; SV *sql_copy; if (DBIc_TRACE(imp_dbh, UNICODE_TRACING, 0, 0)) /* odbcunicode */ TRACE0(imp_dbh, " Processing utf8 sql in unicode mode for SQLPrepareW\n"); sql_copy = sv_newmortal(); sv_setpv(sql_copy, imp_sth->statement); #ifdef sv_utf8_decode sv_utf8_decode(sql_copy); #else SvUTF8_on(sql_copy); #endif SV_toWCHAR(aTHX_ sql_copy); wsql = (SQLWCHAR *)SvPV(sql_copy, wsql_len); rc = SQLPrepareW(imp_sth->hstmt, wsql, wsql_len / sizeof(SQLWCHAR)); } else { if (DBIc_TRACE(imp_dbh, UNICODE_TRACING, 0, 0)) /* odbcunicode */ TRACE0(imp_dbh, " Processing non-utf8 sql in unicode mode\n"); rc = SQLPrepare(imp_sth->hstmt, imp_sth->statement, SQL_NTS); } #else /* !WITH_UNICODE */ if (DBIc_TRACE(imp_dbh, UNICODE_TRACING, 0, 0)) /* odbcunicode */ TRACE0(imp_dbh, " Processing sql in non-unicode mode for SQLPrepare\n"); rc = SQLPrepare(imp_sth->hstmt, imp_sth->statement, SQL_NTS); #endif if (DBIc_TRACE(imp_dbh, DBD_TRACING, 0, 3)) TRACE1(imp_dbh, " SQLPrepare = %d\n", rc); if (!SQL_SUCCEEDED(rc)) { dbd_error(sth, rc, "st_prepare/SQLPrepare"); SQLFreeHandle(SQL_HANDLE_STMT, imp_sth->hstmt); imp_sth->hstmt = SQL_NULL_HSTMT; return 0; } } else if (DBIc_TRACE(imp_dbh, DBD_TRACING, 0, 3)) { TRACE1(imp_dbh, " odbc_exec_direct=1, statement (%s) " "held for later exec\n", imp_sth->statement); } /* init sth pointers */ imp_sth->henv = imp_dbh->henv; imp_sth->hdbc = imp_dbh->hdbc; imp_sth->fbh = NULL; imp_sth->ColNames = NULL; imp_sth->RowBuffer = NULL; imp_sth->RowCount = -1; /* * If odbc_async_exec is set and odbc_async_type is SQL_AM_STATEMENT, * we need to set the SQL_ATTR_ASYNC_ENABLE attribute. */ if (imp_dbh->odbc_async_exec && imp_dbh->odbc_async_type == SQL_AM_STATEMENT){ rc = SQLSetStmtAttr(imp_sth->hstmt, SQL_ATTR_ASYNC_ENABLE, (SQLPOINTER) SQL_ASYNC_ENABLE_ON, SQL_IS_UINTEGER); if (!SQL_SUCCEEDED(rc)) { dbd_error(sth, rc, "st_prepare/SQLSetStmtAttr"); SQLFreeHandle(SQL_HANDLE_STMT, imp_sth->hstmt); imp_sth->hstmt = SQL_NULL_HSTMT; return 0; } } /* * If odbc_query_timeout is set (not -1) * we need to set the SQL_ATTR_QUERY_TIMEOUT */ if (imp_sth->odbc_query_timeout != -1){ odbc_set_query_timeout(imp_dbh, imp_sth->hstmt, imp_sth->odbc_query_timeout); if (!SQL_SUCCEEDED(rc)) { dbd_error(sth, rc, "set_query_timeout"); } /* don't fail if the query timeout can't be set. */ } DBIc_IMPSET_on(imp_sth); return 1; } /* Given SQL type return string description - only used in debug output */ static const char *S_SqlTypeToString (SWORD sqltype) { switch(sqltype) { case SQL_CHAR: return "CHAR"; case SQL_NUMERIC: return "NUMERIC"; case SQL_DECIMAL: return "DECIMAL"; case SQL_INTEGER: return "INTEGER"; case SQL_SMALLINT: return "SMALLINT"; case SQL_FLOAT: return "FLOAT"; case SQL_REAL: return "REAL"; case SQL_DOUBLE: return "DOUBLE"; case SQL_VARCHAR: return "VARCHAR"; #ifdef SQL_WCHAR case SQL_WCHAR: return "UNICODE CHAR"; #endif #ifdef SQL_WVARCHAR /* added for SQLServer 7 ntext type 2/24/2000 */ case SQL_WVARCHAR: return "UNICODE VARCHAR"; #endif #ifdef SQL_WLONGVARCHAR case SQL_WLONGVARCHAR: return "UNICODE LONG VARCHAR"; #endif case SQL_DATE: return "DATE"; case SQL_TYPE_DATE: return "DATE"; case SQL_TIME: return "TIME"; case SQL_TYPE_TIME: return "TIME"; case SQL_TIMESTAMP: return "TIMESTAMP"; case SQL_TYPE_TIMESTAMP: return "TIMESTAMP"; case SQL_LONGVARCHAR: return "LONG VARCHAR"; case SQL_BINARY: return "BINARY"; case SQL_VARBINARY: return "VARBINARY"; case SQL_LONGVARBINARY: return "LONG VARBINARY"; case SQL_BIGINT: return "BIGINT"; case SQL_TINYINT: return "TINYINT"; case SQL_BIT: return "BIT"; case MS_SQLS_XML_TYPE: return "MS SQL Server XML"; } return "unknown"; } static const char *S_SqlCTypeToString (SWORD sqltype) { static char s_buf[100]; #define s_c(x) case x: return #x switch(sqltype) { s_c(SQL_C_CHAR); s_c(SQL_C_LONG); s_c(SQL_C_SLONG); s_c(SQL_C_ULONG); s_c(SQL_C_WCHAR); s_c(SQL_C_BIT); s_c(SQL_C_TINYINT); s_c(SQL_C_STINYINT); s_c(SQL_C_UTINYINT); s_c(SQL_C_SHORT); s_c(SQL_C_SSHORT); s_c(SQL_C_USHORT); s_c(SQL_C_NUMERIC); s_c(SQL_C_DEFAULT); s_c(SQL_C_SBIGINT); s_c(SQL_C_UBIGINT); /* s_c(SQL_C_BOOKMARK); duplicate case */ s_c(SQL_C_GUID); s_c(SQL_C_FLOAT); s_c(SQL_C_DOUBLE); s_c(SQL_C_BINARY); /* s_c(SQL_C_VARBOOKMARK); duplicate case */ s_c(SQL_C_DATE); s_c(SQL_C_TIME); s_c(SQL_C_TIMESTAMP); s_c(SQL_C_TYPE_DATE); s_c(SQL_C_TYPE_TIME); s_c(SQL_C_TYPE_TIMESTAMP); } #undef s_c my_snprintf(s_buf, sizeof(s_buf), "(CType %d)", sqltype); return s_buf; } /* * describes the output variables of a query, * allocates buffers for result rows, * and binds this buffers to the statement. */ int dbd_describe(SV *sth, imp_sth_t *imp_sth, int more) { dTHX; SQLRETURN rc; /* ODBC fn return value */ SQLSMALLINT column_n; /* column we are describing */ imp_fbh_t *fbh; SQLLEN colbuf_bytes_reqd = 0; SQLSMALLINT num_fields; /* number resultant columns */ SQLCHAR *cur_col_name; struct imp_dbh_st *imp_dbh = NULL; imp_dbh = (struct imp_dbh_st *)(DBIc_PARENT_COM(imp_sth)); if (DBIc_TRACE(imp_sth, DBD_TRACING, 0, 4)) TRACE1(imp_sth, " dbd_describe done_desc=%d\n", imp_sth->done_desc); if (imp_sth->done_desc) return 1; /* success, already done it */ imp_sth->done_bind = 0; /* Find out how many columns there are in the result-set */ if (!SQL_SUCCEEDED(rc = SQLNumResultCols(imp_sth->hstmt, &num_fields))) { dbd_error(sth, rc, "dbd_describe/SQLNumResultCols"); return 0; } else if (DBIc_TRACE(imp_sth, DBD_TRACING, 0, 4)) { TRACE2(imp_sth, " dbd_describe SQLNumResultCols=%d (columns=%d)\n", rc, num_fields); } /* * If "more" is not set (set in dbd_st_fetch) and SQLMoreResults is * supported then we skip over non-result-set generating statements. */ imp_sth->done_desc = 1; /* assume ok from here on */ if (!more) { while (num_fields == 0 && imp_dbh->odbc_sqlmoreresults_supported == 1) { rc = SQLMoreResults(imp_sth->hstmt); if (DBIc_TRACE(imp_sth, DBD_TRACING, 0, 8)) TRACE1(imp_sth, " Numfields = 0, SQLMoreResults = %d\n", rc); if (rc == SQL_NO_DATA) { /* mo more results */ imp_sth->moreResults = 0; break; } else if (rc == SQL_SUCCESS_WITH_INFO) { /* warn about an info returns */ dbd_error(sth, rc, "dbd_describe/SQLMoreResults"); } else if (!SQL_SUCCEEDED(rc)) { dbd_error(sth, rc, "dbd_describe/SQLMoreResults"); return 0; } /* reset describe flags, so that we re-describe */ imp_sth->done_desc = 0; /* force future executes to rebind automatically */ imp_sth->odbc_force_rebind = 1; if (!SQL_SUCCEEDED( rc = SQLNumResultCols(imp_sth->hstmt, &num_fields))) { dbd_error(sth, rc, "dbd_describe/SQLNumResultCols"); return 0; } else if (DBIc_TRACE(imp_sth, DBD_TRACING, 0, 8)) { TRACE1(imp_dbh, " num fields after MoreResults = %d\n", num_fields); } } /* end of SQLMoreResults */ } /* end of more */ DBIc_NUM_FIELDS(imp_sth) = num_fields; if (0 == num_fields) { if (DBIc_TRACE(imp_sth, DBD_TRACING, 0, 4)) TRACE0(imp_dbh, " dbd_describe skipped (no resultant cols)\n"); imp_sth->done_desc = 1; return 1; } DBIc_ACTIVE_on(imp_sth); /*HERE*/ /* allocate field buffers */ Newz(42, imp_sth->fbh, num_fields, imp_fbh_t); /* the +255 below instead is due to an old comment in this code before this change claiming foxpro wrote off end of memory (and so 255 bytes were added - kept here without evidence) */ Newz(42, imp_sth->ColNames, (num_fields + 1) * imp_dbh->max_column_name_len + 255, UCHAR); cur_col_name = imp_sth->ColNames; /* Pass 1: Get space needed for field names, display buffer and dbuf */ for (fbh=imp_sth->fbh, column_n=0; column_n < num_fields; column_n++, fbh++) { fbh->imp_sth = imp_sth; #ifdef WITH_UNICODE rc = SQLDescribeColW(imp_sth->hstmt, (SQLSMALLINT)(column_n + 1), (SQLWCHAR *) cur_col_name, (SQLSMALLINT)imp_dbh->max_column_name_len, &fbh->ColNameLen, &fbh->ColSqlType, &fbh->ColDef, &fbh->ColScale, &fbh->ColNullable); #else /* WITH_UNICODE */ rc = SQLDescribeCol(imp_sth->hstmt, (SQLSMALLINT)(column_n + 1), cur_col_name, (SQLSMALLINT)imp_dbh->max_column_name_len, &fbh->ColNameLen, &fbh->ColSqlType, /* column size or precision depending on type */ &fbh->ColDef, &fbh->ColScale, /* decimal digits */ &fbh->ColNullable); #endif /* WITH_UNICODE */ if (!SQL_SUCCEEDED(rc)) { /* should never fail */ dbd_error(sth, rc, "describe/SQLDescribeCol"); break; } fbh->ColName = cur_col_name; #ifdef WITH_UNICODE cur_col_name += fbh->ColNameLen * sizeof(SQLWCHAR); #else cur_col_name += fbh->ColNameLen + 1; cur_col_name[fbh->ColNameLen] = '\0'; /* should not be necessary */ #endif if (DBIc_TRACE(imp_sth, DBD_TRACING, 0, 8)) /* TO_DO the following print for column name won't work well for UCS2 strings when SQLDescribeW above called */ PerlIO_printf(DBIc_LOGPIO(imp_dbh), " DescribeCol column = %d, name = %s, " "namelen = %d, type = %s(%d), " "precision/column size = %ld, scale = %d, " "nullable = %d\n", column_n + 1, fbh->ColName, fbh->ColNameLen, S_SqlTypeToString(fbh->ColSqlType), fbh->ColSqlType, fbh->ColDef, fbh->ColScale, fbh->ColNullable); #ifdef SQL_DESC_DISPLAY_SIZE rc = SQLColAttribute(imp_sth->hstmt, (SQLSMALLINT)(column_n + 1), SQL_DESC_DISPLAY_SIZE, NULL, 0, NULL , &fbh->ColDisplaySize); if (!SQL_SUCCEEDED(rc)) { /* Some ODBC drivers don't support SQL_COLUMN_DISPLAY_SIZE on some result-sets. e.g., The "Infor Integration ODBC driver" cannot handle SQL_COLUMN_DISPLAY_SIZE and SQL_COLUMN_LENGTH for SQLTables and SQLColumns calls. We used to fail here but there is a prescident not to as this code is already in an ifdef for drivers that do not define SQL_COLUMN_DISPLAY_SIZE. Since just about everyone will be using an ODBC driver manager now it is unlikely these attributes will not be defined so we default if the call fails now */ if( DBIc_TRACE(imp_sth, DBD_TRACING, 0, 8) ) { TRACE0(imp_sth, " describe/SQLColAttributes/SQL_COLUMN_DISPLAY_SIZE " "not supported, will be equal to SQL_COLUMN_LENGTH\n"); } /* ColDisplaySize will be made equal to ColLength */ fbh->ColDisplaySize = 0; rc = SQL_SUCCESS; } else if (DBIc_TRACE(imp_sth, DBD_TRACING, 0, 8)) { TRACE1(imp_sth, " SQL_COLUMN_DISPLAY_SIZE = %ld\n", (long)fbh->ColDisplaySize); } /* TBD: should we only add a terminator if it's a char??? */ fbh->ColDisplaySize += 1; /* add terminator */ #else /* !SQL_COLUMN_DISPLAY_SIZE */ fbh->ColDisplaySize = imp_sth->odbc_column_display_size; #endif /* SQL_COLUMN_DISPLAY_SIZE */ /* Workaround bug in Firebird driver that reports timestamps are display size 24 when in fact it can return the longer e.g., 1998-05-15 00:01:00.100000000 */ if ((imp_dbh->driver_type == DT_FIREBIRD) && (fbh->ColSqlType == SQL_TYPE_TIMESTAMP)) { fbh->ColDisplaySize = 30; } /* For MS Access SQL_COLUMN_DISPLAY_SIZE is 22 for doubles and it differs from SQLDescribeCol which says 53 - use the latter or some long numbers get squished. Doesn't seem to fix accdb driver. See rt 69864. */ if ((imp_dbh->driver_type == DT_MS_ACCESS_JET) && (fbh->ColSqlType == SQL_DOUBLE)) { fbh->ColDisplaySize = fbh->ColDef + 1; } #ifdef SQL_DESC_LENGTH rc = SQLColAttribute(imp_sth->hstmt,(SQLSMALLINT)(column_n + 1), SQL_DESC_LENGTH, NULL, 0, NULL ,&fbh->ColLength); if (!SQL_SUCCEEDED(rc)) { /* See comment above under SQL_COLUMN_DISPLAY_SIZE */ fbh->ColLength = imp_sth->odbc_column_display_size; if( DBIc_TRACE(imp_sth, DBD_TRACING, 0, 8) ) { TRACE1(imp_sth, " describe/SQLColAttributes/SQL_COLUMN_LENGTH not " "supported, fallback on %ld\n", (long)fbh->ColLength); } rc = SQL_SUCCESS; } else if (DBIc_TRACE(imp_sth, DBD_TRACING, 0, 8)) { TRACE1(imp_sth, " SQL_COLUMN_LENGTH = %ld\n", (long)fbh->ColLength); } # if defined(WITH_UNICODE) fbh->ColLength += 1; /* add extra byte for double nul terminator */ switch(fbh->ColSqlType) { case SQL_CHAR: fbh->ColSqlType = SQL_WCHAR; break; case SQL_VARCHAR: fbh->ColSqlType = SQL_WVARCHAR; break; case SQL_LONGVARCHAR: fbh->ColSqlType = SQL_WLONGVARCHAR; break; } # endif #else /* !SQL_COLUMN_LENGTH */ fbh->ColLength = imp_sth->odbc_column_display_size; #endif /* SQL_COLUMN_LENGTH */ /* may want to ensure Display Size at least as large as column * length -- workaround for some drivers which report a shorter * display length * */ fbh->ColDisplaySize = fbh->ColDisplaySize > fbh->ColLength ? fbh->ColDisplaySize : fbh->ColLength; /* * change fetched size, decimal digits etc for some types, * The tests for ColDef = 0 are for when the driver does not give * us a length for the column e.g., "max" column types in SQL Server * like varbinary(max). */ fbh->ftype = SQL_C_CHAR; switch(fbh->ColSqlType) { case SQL_VARBINARY: case SQL_BINARY: fbh->ftype = SQL_C_BINARY; if (fbh->ColDef == 0) { /* cope with varbinary(max) */ fbh->ColDisplaySize = DBIc_LongReadLen(imp_sth); } break; #if defined(WITH_UNICODE) case SQL_WCHAR: case SQL_WVARCHAR: fbh->ftype = SQL_C_WCHAR; /* MS SQL returns bytes, Oracle returns characters ... */ if (fbh->ColDef == 0) { /* cope with nvarchar(max) */ fbh->ColDisplaySize = DBIc_LongReadLen(imp_sth); fbh->ColLength = DBIc_LongReadLen(imp_sth); } else if (fbh->ColDef > 2147483590) { /* * The new MS Access driver ACEODBC.DLL cannot cope with the * 40UnicodeRoundTrip test which contains a * select ?, LEN(?) * returning a massive number for the column display size of * the first column. This leads to a memory allocation error * unless we trap it as a large column. */ fbh->ColDisplaySize = DBIc_LongReadLen(imp_sth); fbh->ColLength = DBIc_LongReadLen(imp_sth); } fbh->ColDisplaySize *= sizeof(SQLWCHAR); fbh->ColLength *= sizeof(SQLWCHAR); break; #else /* WITH_UNICODE */ # if defined(SQL_WCHAR) case SQL_WCHAR: if (fbh->ColDef == 0) { fbh->ColDisplaySize = DBIc_LongReadLen(imp_sth); } break; # endif # if defined(SQL_WVARCHAR) case SQL_WVARCHAR: if (fbh->ColDef == 0) { fbh->ColDisplaySize = DBIc_LongReadLen(imp_sth); } break; # endif #endif /* WITH_UNICODE */ case SQL_LONGVARBINARY: fbh->ftype = SQL_C_BINARY; fbh->ColDisplaySize = DBIc_LongReadLen(imp_sth); break; #ifdef SQL_WLONGVARCHAR case SQL_WLONGVARCHAR: /* added for SQLServer 7 ntext type */ # if defined(WITH_UNICODE) fbh->ftype = SQL_C_WCHAR; /* MS SQL returns bytes, Oracle returns characters ... */ fbh->ColLength *= sizeof(SQLWCHAR); fbh->ColDisplaySize = DBIc_LongReadLen(imp_sth) + 1; # else /* !WITH_UNICODE */ fbh->ColDisplaySize = DBIc_LongReadLen(imp_sth) + 1; # endif /* WITH_UNICODE */ break; #endif /* SQL_WLONGVARCHAR */ case SQL_VARCHAR: if (fbh->ColDef == 0) { fbh->ColDisplaySize = DBIc_LongReadLen(imp_sth) + 1; } break; case SQL_LONGVARCHAR: fbh->ColDisplaySize = DBIc_LongReadLen(imp_sth) + 1; break; case MS_SQLS_XML_TYPE: { /* XML columns are inherently Unicode so bind them as such and in this case double the size of LongReadLen as we count LongReadLen as characters not bytes */ #ifdef WITH_UNICODE fbh->ftype = SQL_C_WCHAR; fbh->ColDisplaySize = DBIc_LongReadLen(imp_sth) * sizeof(SQLWCHAR) + sizeof(SQLWCHAR); #else fbh->ColDisplaySize = DBIc_LongReadLen(imp_sth) + 1; #endif break; } #ifdef TIMESTAMP_STRUCT /* XXX! */ case SQL_TIMESTAMP: case SQL_TYPE_TIMESTAMP: fbh->ftype = SQL_C_TIMESTAMP; fbh->ColDisplaySize = sizeof(TIMESTAMP_STRUCT); break; #endif case SQL_INTEGER: fbh->ftype = SQL_C_LONG; fbh->ColDisplaySize = sizeof(SQLINTEGER); break; } colbuf_bytes_reqd += fbh->ColDisplaySize; /* * We later align columns in the buffer on integer boundaries so we * we need to take account of this here. The last % is to avoid adding * sizeof(int) if we are already aligned. */ colbuf_bytes_reqd += (sizeof(int) - (colbuf_bytes_reqd % sizeof(int))) % sizeof(int); if (DBIc_TRACE(imp_sth, DBD_TRACING, 0, 4)) PerlIO_printf(DBIc_LOGPIO(imp_dbh), " now using col %d: type = %s (%d), len = %ld, " "display size = %ld, prec = %ld, scale = %lu\n", column_n + 1, S_SqlTypeToString(fbh->ColSqlType), fbh->ColSqlType, (long)fbh->ColLength, (long)fbh->ColDisplaySize, (long)fbh->ColDef, (unsigned long)fbh->ColScale); } if (!SQL_SUCCEEDED(rc)) { /* dbd_error called above */ if (DBIc_TRACE(imp_sth, DBD_TRACING, 0, 5)) TRACE0(imp_sth, "Freeing fbh\n"); Safefree(imp_sth->fbh); imp_sth->fbh = NULL; return 0; } imp_sth->RowBufferSizeReqd = colbuf_bytes_reqd; if (DBIc_TRACE(imp_sth, DBD_TRACING, 0, 4)) TRACE1(imp_sth, " -dbd_describe done_bind=%d\n", imp_sth->done_bind); return 1; } static SQLRETURN bind_columns( SV *h, imp_sth_t *imp_sth) { SQLSMALLINT num_fields; UCHAR *rbuf_ptr; imp_fbh_t *fbh; SQLRETURN rc = SQL_SUCCESS; /* ODBC fn return value */ SQLSMALLINT i; num_fields = DBIc_NUM_FIELDS(imp_sth); if (DBIc_TRACE(imp_sth, DBD_TRACING, 0, 4)) TRACE2(imp_sth, " bind_columns fbh=%p fields=%d\n", imp_sth->fbh, num_fields); /* allocate Row memory */ Newz(42, imp_sth->RowBuffer, imp_sth->RowBufferSizeReqd + num_fields, UCHAR); rbuf_ptr = imp_sth->RowBuffer; for(i=0, fbh = imp_sth->fbh; i < num_fields && SQL_SUCCEEDED(rc); i++, fbh++) { if (!(fbh->bind_flags & ODBC_TREAT_AS_LOB)) { fbh->data = rbuf_ptr; rbuf_ptr += fbh->ColDisplaySize; /* alignment -- always pad so the next column is aligned on a word boundary */ rbuf_ptr += (sizeof(int) - ((rbuf_ptr - imp_sth->RowBuffer) % sizeof(int))) % sizeof(int); /* Bind output column variables */ if (DBIc_TRACE(imp_sth, DBD_TRACING, 0, 4)) PerlIO_printf(DBIc_LOGPIO(imp_sth), " Bind %d: type = %s(%d), buf=%p, buflen=%ld\n", i+1, S_SqlTypeToString(fbh->ftype), fbh->ftype, fbh->data, fbh->ColDisplaySize); rc = SQLBindCol(imp_sth->hstmt, (SQLSMALLINT)(i+1), fbh->ftype, fbh->data, fbh->ColDisplaySize, &fbh->datalen); if (!SQL_SUCCEEDED(rc)) { dbd_error(h, rc, "describe/SQLBindCol"); break; } /* Save the fact this column is now bound and hence the type can not be changed */ fbh->bound = 1; } else if (DBIc_TRACE(imp_sth, DBD_TRACING, 0, 4)) { TRACE1(imp_sth, " TreatAsLOB bind_flags = %lx\n", fbh->bind_flags); } } imp_sth->done_bind = 1; if (DBIc_TRACE(imp_sth, DBD_TRACING, 0, 4)) TRACE1(imp_sth, " bind_columns=%d\n", rc); return rc; } /*======================================================================*/ /* */ /* dbd_st_execute */ /* ============== */ /* */ /* returns: */ /* -2 - error */ /* >=0 - ok, row count */ /* -1 - unknown count */ /* */ /*======================================================================*/ int dbd_st_execute( SV *sth, imp_sth_t *imp_sth) { dTHX; IV ret; if (DBIc_TRACE(imp_sth, DBD_TRACING, 0, 3)) TRACE1(imp_sth, " +dbd_st_execute(%p)\n", sth); ret = dbd_st_execute_iv(sth, imp_sth); if (ret > INT_MAX) { if (DBIc_WARN(imp_sth)) { warn("SQLRowCount overflowed in execute - see RT 81911 - you need to upgrade your DBI to at least 1.633_92"); } ret = INT_MAX; } if (DBIc_TRACE(imp_sth, DBD_TRACING, 0, 3)) TRACE2(imp_sth, " -dbd_st_execute(%p)=%"IVdf"\n", sth, ret); return (int)ret; } IV dbd_st_execute_iv( SV *sth, imp_sth_t *imp_sth) { dTHX; RETCODE rc; D_imp_dbh_from_sth; int outparams = 0; SQLLEN ret; if (DBIc_TRACE(imp_sth, DBD_TRACING, 0, 3)) TRACE1(imp_dbh, " +dbd_st_execute_iv(%p)\n", sth); if (SQL_NULL_HDBC == imp_dbh->hdbc) { DBIh_SET_ERR_CHAR(sth, (imp_xxh_t*)imp_sth, Nullch, 1, "Database handle has been disconnected", Nullch, Nullch); return -2; } /* * if the handle is active, we need to finish it here. * Note that dbd_st_finish already checks to see if it's active. */ dbd_st_finish(sth, imp_sth);; /* * bind_param_inout support */ outparams = (imp_sth->out_params_av) ? AvFILL(imp_sth->out_params_av)+1 : 0; if (DBIc_TRACE(imp_sth, DBD_TRACING, 0, 4)) { TRACE1(imp_dbh, " outparams = %d\n", outparams); } if (imp_dbh->odbc_defer_binding) { rc = SQLFreeStmt(imp_sth->hstmt, SQL_RESET_PARAMS); /* check bind input parameters */ if (imp_sth->all_params_hv) { HV *hv = imp_sth->all_params_hv; SV *sv; char *key; I32 retlen; hv_iterinit(hv); while( (sv = hv_iternextsv(hv, &key, &retlen)) != NULL ) { if (sv != &PL_sv_undef) { phs_t *phs = (phs_t*)(void*)SvPVX(sv); if (!rebind_param(aTHX_ sth, imp_sth, imp_dbh, phs)) return -2; if (DBIc_TRACE(imp_sth, DBD_TRACING, 0, 8)) { if (SvOK(phs->sv) && (phs->value_type == SQL_C_CHAR)) { char sbuf[256]; unsigned int i = 0; while((phs->sv_buf[i] != 0) && (i < (sizeof(sbuf) - 6))) { sbuf[i] = phs->sv_buf[i]; i++; } strcpy(&sbuf[i], "..."); TRACE2(imp_dbh, " rebind check char Param %d (%s)\n", phs->idx, sbuf); } } } } } } if (outparams) { /* check validity of bind_param_inout SV's */ int i = outparams; while(--i >= 0) { phs_t *phs = (phs_t*)(void*)SvPVX(AvARRAY(imp_sth->out_params_av)[i]); /* Make sure we have the value in string format. Typically a number */ /* will be converted back into a string using the same bound buffer */ /* so the sv_buf test below will not trip. */ /* mutation check */ if (SvTYPE(phs->sv) != phs->sv_type /* has the type changed? */ || (SvOK(phs->sv) && !SvPOK(phs->sv)) /* is there still a string? */ || (SvPVX(phs->sv) != phs->sv_buf) /* has the string buffer moved? */ || (SvOK(phs->sv) != phs->svok) ) { if (!rebind_param(aTHX_ sth, imp_sth, imp_dbh, phs)) croak("Can't rebind placeholder %s", phs->name); } else { /* no mutation found */ } } } if (imp_sth->odbc_exec_direct) { /* statement ready for SQLExecDirect */ if (DBIc_TRACE(imp_sth, DBD_TRACING, 0, 5)) { TRACE0(imp_dbh, " odbc_exec_direct=1, using SQLExecDirect\n"); } rc = SQLExecDirect(imp_sth->hstmt, imp_sth->statement, SQL_NTS); } else { rc = SQLExecute(imp_sth->hstmt); } if (DBIc_TRACE(imp_sth, DBD_TRACING, 0, 8)) TRACE2(imp_dbh, " SQLExecute/SQLExecDirect(%p)=%d\n", imp_sth->hstmt, rc); /* * If asynchronous execution has been enabled, SQLExecute will * return SQL_STILL_EXECUTING until it has finished. * Grab whatever messages occur during execution... */ while (rc == SQL_STILL_EXECUTING){ dbd_error(sth, rc, "st_execute/SQLExecute"); /* * Wait a second so we don't loop too fast and bring the machine * to its knees */ if (DBIc_TRACE(imp_sth, DBD_TRACING, 0, 5)) TRACE1(imp_dbh, " SQLExecute(%p) still executing", imp_sth->hstmt); sleep(1); rc = SQLExecute(imp_sth->hstmt); } /* patches to handle blobs better, via Jochen Wiedmann */ while (rc == SQL_NEED_DATA) { phs_t* phs; STRLEN len; UCHAR* ptr; if (DBIc_TRACE(imp_sth, DBD_TRACING, 0, 5)) TRACE1(imp_dbh, " NEED DATA %p\n", imp_sth->hstmt); while ((rc = SQLParamData(imp_sth->hstmt, (PTR*) &phs)) == SQL_STILL_EXECUTING) { if (DBIc_TRACE(imp_sth, DBD_TRACING, 0, 5)) TRACE1(imp_dbh, " SQLParamData(%p) still executing", imp_sth->hstmt); /* * wait a while to avoid looping too fast waiting for SQLParamData * to complete. */ sleep(1); } if (rc != SQL_NEED_DATA) { if (DBIc_TRACE(imp_sth, DBD_TRACING, 0, 5)) TRACE1(imp_dbh, " SQLParamData=%d\n", rc); break; } /* phs->sv is already upgraded to a PV in rebind_param. * It is not NULL, because we otherwise won't be called here * (value_len = 0). */ if (DBIc_TRACE(imp_sth, DBD_TRACING, 0, 5)) TRACE2(imp_dbh, " SQLParamData needs phs %p, sending %"UVuf" bytes\n", phs, (UV)len); ptr = SvPV(phs->sv, len); rc = SQLPutData(imp_sth->hstmt, ptr, len); if (!SQL_SUCCEEDED(rc)) { if (DBIc_TRACE(imp_sth, DBD_TRACING, 0, 5)) TRACE1(imp_dbh, " SQLPutData=%d\n", rc); break; } rc = SQL_NEED_DATA; /* So the loop continues ... */ } /* * Call dbd_error if we get SQL_SUCCESS_WITH_INFO as there may * be some status msgs for us. */ if (SQL_SUCCESS_WITH_INFO == rc) { dbd_error(sth, rc, "st_execute/SQLExecute"); } if (!SQL_SUCCEEDED(rc) && rc != SQL_NO_DATA) { dbd_error(sth, rc, "st_execute/SQLExecute"); if (DBIc_TRACE(imp_sth, DBD_TRACING, 0, 3)) TRACE1(imp_dbh, " -dbd_st_execute_iv(%p)=-2\n", sth); return -2; } /* * If SQLExecute executes a searched update, insert, or delete statement * that does not affect any rows at the data source, the call to * SQLExecute returns SQL_NO_DATA. */ if (rc != SQL_NO_DATA) { /* SWORD num_fields; */ RETCODE rc2; rc2 = SQLRowCount(imp_sth->hstmt, &imp_sth->RowCount); if (DBIc_TRACE(imp_sth, DBD_TRACING, 0, 7)) TRACE2(imp_dbh, " SQLRowCount=%d (rows=%"IVdf")\n", rc2, (IV)(SQL_SUCCEEDED(rc2) ? imp_sth->RowCount : -1)); if (!SQL_SUCCEEDED(rc2)) { dbd_error(sth, rc2, "st_execute/SQLRowCount"); /* XXX ? */ imp_sth->RowCount = -1; DBIc_ROW_COUNT(imp_sth) = -1; } else { DBIc_ROW_COUNT(imp_sth) = imp_sth->RowCount; } /* sanity check for strange circumstances and multiple types of * result sets. Crazy that it can happen, but it can with * multiple result sets and stored procedures which return * result sets. * This seems to slow things down a bit and is rarely needed. * * This can happen in Sql Server in strange cases where stored * procs have multiple result sets. Sometimes, if there is an * select then an insert, etc. Maybe this should be a special * attribute to force a re-describe after every execute? */ if (imp_sth->odbc_force_rebind) { /* force calling dbd_describe after each execute */ odbc_clear_result_set(sth, imp_sth); } } else { /* SQL_NO_DATA returned, must have no rows :) */ /* seem to need to reset the done_desc, but not sure if this is * what we want yet */ if (DBIc_TRACE(imp_sth, DBD_TRACING, 0, 7)) TRACE0(imp_dbh, " SQL_NO_DATA...resetting done_desc!\n"); imp_sth->done_desc = 0; imp_sth->RowCount = 0; DBIc_ROW_COUNT(imp_sth) = 0; /* Strictly speaking a driver should only return SQL_NO_DATA when a searched insert/update/delete affects no rows and so it is pointless continuing below and calling SQLNumResultCols. However, if you run a procedure such as this: CREATE PROCEDURE PERL_DBD_PROC1 (\@i INT) AS DECLARE \@result INT; BEGIN SET \@result = \@i; IF (\@i = 99) BEGIN UPDATE PERL_DBD_TABLE1 SET i=\@i; SET \@result = \@i + 1; END; SELECT \@result; END to MS SQL Server, it will return SQL_NO_DATA but then SQLNumResultCols will be successful and return 1 column for the result set. As a result, we need to continue below. Some versions of freeTDS will return SQLNumResultCols = 1 after a "delete from table" but then give a function sequence error when SQLDescribeCol called. It would have been handy to return 0 here to workaround that bug but the above does not allow us to. */ } /* * MS SQL Server is very picky wrt to completing a procedure i.e., * it says the output bound parameters are not available until the * procedure is complete and the procedure is not complete until you * have called SQLMoreResults and it has returned SQL_NO_DATA. So, if you * call a procedure multiple times in the same statement (e.g., by just * calling execute) DBD::ODBC will call dbd_describe to describe the first * execute, discover there is no result-set and call SQLMoreResults - ok, * but after that, the dbd_describe is done and SQLMoreResults will not * get called. The following is a kludge to get around this until * a) DBD::ODBC can be changed to stop skipping over non-result-set * generating statements and b) the SQLMoreResults calls move out of * dbd_describe. */ { SQLSMALLINT flds = 0; SQLRETURN sts; if (!SQL_SUCCEEDED(sts = SQLNumResultCols(imp_sth->hstmt, &flds))) { dbd_error(sth, sts, "dbd_describe/SQLNumResultCols"); return -2; } if (DBIc_TRACE(imp_sth, DBD_TRACING, 0, 4)) TRACE1(imp_dbh, " SQLNumResultCols=0 (flds=%d)\n", flds); if (flds == 0) { /* not a result-set */ if (DBIc_TRACE(imp_sth, DBD_TRACING, 0, 4)) TRACE2(imp_dbh, " Not a result-set nflds=(%d,%d), resetting done_desc\n", flds, DBIc_NUM_FIELDS(imp_sth)); imp_sth->done_desc = 0; } } if (!imp_sth->done_desc) { /* This needs to be done after SQLExecute for some drivers! */ /* Especially for order by and join queries. */ /* See Microsoft Knowledge Base article (#Q124899) */ /* describe and allocate storage for results (if any needed) */ if (!dbd_describe(sth, imp_sth, 0)) { if (DBIc_TRACE(imp_sth, DBD_TRACING, 0, 3)) { TRACE0(imp_sth, " !!dbd_describe failed, dbd_st_execute_iv #1...!\n"); } if (DBIc_TRACE(imp_sth, DBD_TRACING, 0, 3)) TRACE1(imp_dbh, " -dbd_st_execute_iv(%p)=-2\n", sth); return -2; /* dbd_describe already called dbd_error() */ } } if (DBIc_NUM_FIELDS(imp_sth) > 0) { DBIc_ACTIVE_on(imp_sth); /* only set for select (?) */ if (DBIc_TRACE(imp_sth, DBD_TRACING, 0, 4)) { TRACE1(imp_sth, " have %d fields\n", DBIc_NUM_FIELDS(imp_sth)); } } else { if (DBIc_TRACE(imp_sth, DBD_TRACING, 0, 4)) { TRACE0(imp_dbh, " got no rows: resetting ACTIVE, moreResults\n"); } imp_sth->moreResults = 0; /* flag that we've done the describe to avoid a problem * where calling describe after execute returned no rows * caused SQLServer to provide a description of a query * that didn't quite apply. */ /* imp_sth->done_desc = 1; */ DBIc_ACTIVE_off(imp_sth); } if (outparams) { /* check validity of bound output SV's */ odbc_handle_outparams(aTHX_ imp_sth, DBIc_TRACE_LEVEL(imp_sth)); } /* * JLU: Jon Smirl had: * return (imp_sth->RowCount == -1 ? -1 : abs(imp_sth->RowCount)); * why? Why do you need the abs() of the rowcount? Special reason? * The e-mail that accompanied the change indicated that Sybase would return * a negative value for an estimate. Wouldn't you WANT that to stay * negative? * * dgood: JLU had: * return imp_sth->RowCount; * Because you return -2 on errors so if you don't abs() it, a perfectly * valid return value will get flagged as an error... */ ret = (imp_sth->RowCount == -1 ? -1 : imp_sth->RowCount); /* TO_DO NONESENSE IT IS NOOP */ if (DBIc_TRACE(imp_sth, DBD_TRACING, 0, 3)) TRACE2(imp_dbh, " -dbd_st_execute_iv(%p)=%"IVdf"\n", sth, ret); return ret; } /*---------------------------------------- * running $sth->fetch() *---------------------------------------- */ AV *dbd_st_fetch(SV *sth, imp_sth_t *imp_sth) { dTHX; D_imp_dbh_from_sth; int i; AV *av; RETCODE rc; int num_fields; #ifdef TIMESTAMP_STRUCT /* iODBC doesn't define this */ char cvbuf[512]; #endif int ChopBlanks; /* Check that execute() was executed sucessfully. This also implies */ /* that dbd_describe() executed sucessfuly so the memory buffers */ /* are allocated and bound. */ if ( !DBIc_ACTIVE(imp_sth) ) { /*dbd_error(sth, DBDODBC_INTERNAL_ERROR, "no select statement currently executing");*/ /* The following issues a warning (instead of the error above) when a selectall_* did not actually return a result-set e.g., if someone passed a create table to selectall_*. There is some debate as to what should happen here. See http://www.nntp.perl.org/group/perl.dbi.dev/2011/06/msg6606.html and rt 68720 and rt_68720.t */ DBIh_SET_ERR_CHAR(sth, (imp_xxh_t*)imp_sth, "0", 0, "no select statement currently executing", "", "fetch"); return Nullav; } if (!imp_sth->done_bind) { rc = bind_columns(sth, imp_sth); if (!SQL_SUCCEEDED(rc)) { Safefree(imp_sth->fbh); imp_sth->fbh = NULL; dbd_st_finish(sth, imp_sth); return Nullav; } } rc = SQLFetch(imp_sth->hstmt); if (DBIc_TRACE(imp_sth, DBD_TRACING, 0, 4)) TRACE1(imp_dbh, " SQLFetch=%d\n", rc); if (!SQL_SUCCEEDED(rc)) { if (SQL_NO_DATA_FOUND == rc) { if (imp_dbh->odbc_sqlmoreresults_supported == 1) { rc = SQLMoreResults(imp_sth->hstmt); /* Check for multiple results */ if (DBIc_TRACE(imp_sth, DBD_TRACING, 0, 6)) TRACE1(imp_dbh, " Getting more results: %d\n", rc); if (rc == SQL_SUCCESS_WITH_INFO) { dbd_error(sth, rc, "st_fetch/SQLMoreResults"); /* imp_sth->moreResults = 0; */ } if (SQL_SUCCEEDED(rc)){ /* More results detected. Clear out the old result */ /* stuff and re-describe the fields. */ if (DBIc_TRACE(imp_sth, DBD_TRACING, 0, 3)) { TRACE0(imp_dbh, " MORE Results!\n"); } odbc_clear_result_set(sth, imp_sth); /* force future executes to rebind automatically */ imp_sth->odbc_force_rebind = 1; /* tell the odbc driver that we need to unbind the * bound columns. Fix bug for 0.35 (2/8/02) */ rc = SQLFreeStmt(imp_sth->hstmt, SQL_UNBIND); if (!SQL_SUCCEEDED(rc)) { AllODBCErrors(imp_dbh->henv, imp_dbh->hdbc, 0, DBIc_TRACE(imp_sth, DBD_TRACING, 0, 3), DBIc_LOGPIO(imp_dbh)); } if (!dbd_describe(sth, imp_sth, 1)) { if (DBIc_TRACE(imp_sth, DBD_TRACING, 0, 3)) TRACE0(imp_dbh, " !!MORE Results dbd_describe failed...!\n"); return Nullav; /* dbd_describe already called dbd_error() */ } if (DBIc_TRACE(imp_sth, DBD_TRACING, 0, 4)) { TRACE0(imp_dbh, " MORE Results dbd_describe success...!\n"); } /* set moreResults so we'll know we can keep fetching */ imp_sth->moreResults = 1; imp_sth->done_desc = 0; return Nullav; } else if (rc == SQL_NO_DATA_FOUND || rc == SQL_NO_DATA || rc == SQL_SUCCESS_WITH_INFO){ /* No more results */ /* need to check output params here... */ int outparams = (imp_sth->out_params_av) ? AvFILL(imp_sth->out_params_av)+1 : 0; if (DBIc_TRACE(imp_sth, DBD_TRACING, 0, 6)) { TRACE1(imp_sth, " No more results -- outparams = %d\n", outparams); } imp_sth->moreResults = 0; imp_sth->done_desc = 1; if (outparams) { odbc_handle_outparams(aTHX_ imp_sth, DBIc_TRACE_LEVEL(imp_sth)); } /* XXX need to 'finish' here */ dbd_st_finish(sth, imp_sth); return Nullav; } else { dbd_error(sth, rc, "st_fetch/SQLMoreResults"); } } else { /* * SQLMoreResults not supported, just finish. * per bug found by Jarkko Hyty [hyoty@medialab.sonera.fi] * No more results */ imp_sth->moreResults = 0; /* XXX need to 'finish' here */ /*dbd_st_finish(sth, imp_sth);*/ return Nullav; } } else { dbd_error(sth, rc, "st_fetch/SQLFetch"); /* XXX need to 'finish' here */ /* MJE commented out the following in 1.34_3 as it prevents calling odbc_get dbd_st_finish(sth, imp_sth);*/ return Nullav; } } if (imp_sth->RowCount == -1) imp_sth->RowCount = 0; imp_sth->RowCount++; av = DBIc_DBISTATE(imp_sth)->get_fbav(imp_sth); num_fields = AvFILL(av)+1; if (DBIc_TRACE(imp_sth, DBD_TRACING, 0, 4)) TRACE1(imp_dbh, " fetch num_fields=%d\n", num_fields); ChopBlanks = DBIc_has(imp_sth, DBIcf_ChopBlanks); for(i=0; i < num_fields; ++i) { imp_fbh_t *fbh = &imp_sth->fbh[i]; SV *sv = AvARRAY(av)[i]; /* Note: we (re)use the SV in the AV */ if (DBIc_TRACE(imp_sth, DBD_TRACING, 0, 4)) PerlIO_printf( DBIc_LOGPIO(imp_dbh), " fetch col#%d %s datalen=%ld displ=%lu\n", i+1, fbh->ColName, (long)fbh->datalen, (unsigned long)fbh->ColDisplaySize); if (fbh->datalen == SQL_NULL_DATA) { /* NULL value */ SvOK_off(sv); continue; } if (fbh->datalen > fbh->ColDisplaySize || fbh->datalen < 0) { /* truncated LONG ??? DBIcf_LongTruncOk() */ /* DBIcf_LongTruncOk this should only apply to LONG type fields */ /* truncation of other fields should always be an error since it's */ /* a sign of an internal error */ if (!DBIc_has(imp_sth, DBIcf_LongTruncOk) /* && rc == SQL_SUCCESS_WITH_INFO */) { /* * Since we've detected the problem locally via the datalen, * we don't need to worry about the value of rc. * * This used to make sure rc was set to SQL_SUCCESS_WITH_INFO * but since it's an error and not SUCCESS, call dbd_error() * with SQL_ERROR explicitly instead. */ #ifdef COULD_DO_THIS DBIh_SET_ERR_CHAR( sth, (imp_xxh_t*)imp_sth, Nullch, 1, "st_fetch/SQLFetch (long truncated DBI attribute LongTruncOk " "not set and/or LongReadLen too small)", Nullch, Nullch); #endif dbd_error( sth, DBDODBC_INTERNAL_ERROR, "st_fetch/SQLFetch (long truncated DBI attribute LongTruncOk " "not set and/or LongReadLen too small)"); return Nullav; } /* LongTruncOk true, just ensure perl has the right length * for the truncated data. */ sv_setpvn(sv, (char*)fbh->data, fbh->ColDisplaySize); } else { switch(fbh->ftype) { #ifdef TIMESTAMP_STRUCT /* iODBC doesn't define this */ case SQL_C_TIMESTAMP: case SQL_C_TYPE_TIMESTAMP: { TIMESTAMP_STRUCT *ts; ts = (TIMESTAMP_STRUCT *)fbh->data; if (DBIc_TRACE(imp_sth, DBD_TRACING, 0, 4)) TRACE0(imp_dbh, " adjusting timestamp\n"); my_snprintf(cvbuf, sizeof(cvbuf), "%04d-%02d-%02d %02d:%02d:%02d", ts->year, ts->month, ts->day, ts->hour, ts->minute, ts->second, ts->fraction); sv_setpv(sv, cvbuf); break; } #endif #if defined(WITH_UNICODE) case SQL_C_WCHAR: if (ChopBlanks && fbh->ColSqlType == SQL_WCHAR && fbh->datalen > 0) { SQLWCHAR *p = (SQLWCHAR*)fbh->data; SQLWCHAR blank = 0x20; SQLLEN orig_len = fbh->datalen; while(fbh->datalen && p[fbh->datalen/sizeof(SQLWCHAR)-1] == blank) { --fbh->datalen; } if (DBIc_TRACE(imp_sth, UNICODE_TRACING, 0, 0)) /* odbcunicode */ TRACE2(imp_sth, " Unicode ChopBlanks orig len=%ld, new len=%ld\n", orig_len, fbh->datalen); } sv_setwvn(aTHX_ sv, (SQLWCHAR*)fbh->data, fbh->datalen/sizeof(SQLWCHAR)); if (DBIc_TRACE(imp_sth, UNICODE_TRACING, 0, 0)) { /* odbcunicode */ /* unsigned char dlog[256]; */ /* unsigned char *src; */ /* char *dst = dlog; */ /* unsigned int n; */ /* STRLEN len; */ /* src = SvPV(sv, len); */ /* dst += sprintf(dst, "0x"); */ /* for (n = 0; (n < 126) && (n < len); n++, src++) { */ /* dst += sprintf(dst, "%2.2x", *src); */ /* } */ /*TRACE1(imp_sth, " SQL_C_WCHAR data = %s\n", dlog);*/ TRACE1(imp_sth, " SQL_C_WCHAR data = %.100s\n", neatsvpv(sv, 100)); } break; #endif /* WITH_UNICODE */ case SQL_INTEGER: sv_setiv(sv, *((SQLINTEGER *)fbh->data)); break; default: if (ChopBlanks && fbh->datalen > 0 && ((fbh->ColSqlType == SQL_CHAR) || (fbh->ColSqlType == SQL_WCHAR))) { char *p = (char*)fbh->data; if (DBIc_TRACE(imp_sth, DBD_TRACING, 0, 5)) TRACE0(imp_sth, " chopping blanks\n"); while(fbh->datalen && p[fbh->datalen - 1]==' ') --fbh->datalen; } sv_setpvn(sv, (char*)fbh->data, fbh->datalen); if (imp_sth->odbc_utf8_on && fbh->ftype != SQL_C_BINARY ) { if (DBIc_TRACE(imp_sth, UNICODE_TRACING, 0, 0)) /* odbcunicode */ TRACE0(imp_sth, " odbc_utf8 - decoding UTF-8"); #ifdef sv_utf8_decode sv_utf8_decode(sv); #else SvUTF8_on(sv); #endif } if (DBIc_TRACE(imp_sth, DBD_TRACING, 0, 4)) TRACE2(imp_sth, " %s(%ld)\n", neatsvpv(sv, fbh->datalen+5), fbh->datalen); } } #if DBIXS_REVISION > 13590 /* If a bind type was specified we use DBI's sql_type_cast to cast it - currently only number types are handled */ if ( /*(fbh->req_type == SQL_INTEGER) || not needed as we've already done a sv_setiv*/ (fbh->req_type == SQL_NUMERIC) || (fbh->req_type == SQL_DECIMAL)) { int sts; char errstr[256]; if (DBIc_TRACE(imp_sth, DBD_TRACING, 0, 4)) TRACE3(imp_sth, " sql_type_case %s %"IVdf" %lx\n", neatsvpv(sv, fbh->datalen+5), fbh->req_type, fbh->bind_flags); sts = DBIc_DBISTATE(imp_sth)->sql_type_cast_svpv( aTHX_ sv, fbh->req_type, (U32)fbh->bind_flags, NULL); if (DBIc_TRACE(imp_sth, DBD_TRACING, 0, 4)) TRACE1(imp_sth, " sql_type_cast=%d\n", sts); if (sts == 0) { sprintf(errstr, "over/under flow converting column %d to type %"IVdf"", i+1, fbh->req_type); DBIh_SET_ERR_CHAR(sth, (imp_xxh_t*)imp_sth, Nullch, 1, errstr, Nullch, Nullch); return Nullav; } else if (sts == -2) { sprintf(errstr, "unsupported bind type %"IVdf" for column %d in sql_type_cast_svpv", fbh->req_type, i+1); DBIh_SET_ERR_CHAR(sth, (imp_xxh_t*)imp_sth, Nullch, 1, errstr, Nullch, Nullch); return Nullav; } } #endif /* DBIXS_REVISION > 13590 */ } /* end of loop through bound columns */ return av; } /* /\* SHOULD BE ABLE TO DELETE BOTH OF THESE NOW AND dbd_st_rows macro in dbdimp.h *\/ */ /* int dbd_st_rows(SV *sth, imp_sth_t *imp_sth) */ /* { */ /* return (int)imp_sth->RowCount; */ /* } */ /* IV dbd_st_rows(SV *sth, imp_sth_t *imp_sth) */ /* { */ /* return imp_sth->RowCount; */ /* } */ int dbd_st_finish(SV *sth, imp_sth_t *imp_sth) { dTHX; D_imp_dbh_from_sth; RETCODE rc; if (DBIc_TRACE(imp_sth, DBD_TRACING, 0, 3)) TRACE1(imp_sth, " dbd_st_finish(%p)\n", sth); /* Cancel further fetches from this cursor. */ /* We don't close the cursor till DESTROY (dbd_st_destroy). */ /* The application may re execute(...) it. */ /* XXX semantics of finish (eg oracle vs odbc) need lots more thought */ /* re-read latest DBI specs and ODBC manuals */ if (DBIc_ACTIVE(imp_sth) && imp_dbh->hdbc != SQL_NULL_HDBC) { rc = SQLFreeStmt(imp_sth->hstmt, SQL_CLOSE);/* TBD: 3.0 update */ if (!SQL_SUCCEEDED(rc)) { dbd_error(sth, rc, "finish/SQLFreeStmt(SQL_CLOSE)"); return 0; } if (DBIc_TRACE(imp_sth, DBD_TRACING, 0, 6)) { TRACE0(imp_dbh, " dbd_st_finish closed query:\n"); } } DBIc_ACTIVE_off(imp_sth); return 1; } void dbd_st_destroy(SV *sth, imp_sth_t *imp_sth) { dTHX; D_imp_dbh_from_sth; RETCODE rc; /* Free contents of imp_sth */ /* PerlIO_printf(DBIc_LOGPIO(imp_dbh), " dbd_st_destroy\n"); */ Safefree(imp_sth->fbh); Safefree(imp_sth->RowBuffer); Safefree(imp_sth->ColNames); Safefree(imp_sth->statement); if (imp_sth->out_params_av) sv_free((SV*)imp_sth->out_params_av); if (imp_sth->param_status_array) { Safefree(imp_sth->param_status_array); imp_sth->param_status_array = NULL; } if (imp_sth->all_params_hv) { HV *hv = imp_sth->all_params_hv; SV *sv; char *key; I32 retlen; hv_iterinit(hv); while( (sv = hv_iternextsv(hv, &key, &retlen)) != NULL ) { if (sv != &PL_sv_undef) { phs_t *phs_tpl = (phs_t*)(void*)SvPVX(sv); sv_free(phs_tpl->sv); if (phs_tpl->strlen_or_ind_array) { Safefree(phs_tpl->strlen_or_ind_array); phs_tpl->strlen_or_ind_array = NULL; } if (phs_tpl->param_array_buf) { Safefree(phs_tpl->param_array_buf); phs_tpl->param_array_buf = NULL; } } } sv_free((SV*)imp_sth->all_params_hv); } if (imp_sth->param_status_array) { Safefree(imp_sth->param_status_array); imp_sth->param_status_array = NULL; } /* SQLxxx functions dump core when no connection exists. This happens * when the db was disconnected before perl ending. Hence, * checking for the dirty flag. */ if (imp_dbh->hdbc != SQL_NULL_HDBC && !PL_dirty) { rc = SQLFreeHandle(SQL_HANDLE_STMT, imp_sth->hstmt); if (DBIc_TRACE(imp_sth, DBD_TRACING, 0, 5)) TRACE1(imp_dbh, " SQLFreeHandle(stmt)=%d\n", rc); if (!SQL_SUCCEEDED(rc)) { dbd_error(sth, rc, "st_destroy/SQLFreeHandle(stmt)"); /* return 0; */ } } DBIc_IMPSET_off(imp_sth); /* let DBI know we've done it */ } /************************************************************************/ /* */ /* get_param_type */ /* ============== */ /* */ /* Sets the following fields for a parameter in the phs_st: */ /* */ /* sql_type - the SQL type to use when binding this parameter */ /* describe_param_called - set to 1 if we called SQLDescribeParam */ /* describe_param_status - set to result of SQLDescribeParam if */ /* SQLDescribeParam called */ /* described_sql_type - the sql type returned by SQLDescribeParam */ /* param_size - the parameter size returned by SQLDescribeParam */ /* */ /* The sql_type field is set to one of the following: */ /* value passed in bind method call if specified */ /* if SQLDescribeParam not supported: */ /* value of odbc_default_bind_type attribute if set else */ /* SQL_VARCHAR */ /* if SQLDescribeParam supported: */ /* if SQLDescribeParam succeeds: */ /* parameter type returned by SQLDescribeParam */ /* else if SQLDescribeParam fails: */ /* value of odbc_default_bind_type attribute if set else */ /* SQL_VARCHAR */ /* */ /* NOTE: Just because an ODBC driver says it supports SQLDescribeParam */ /* does not mean you can call it successfully e.g., MS SQL Server */ /* implements SQLDescribeParam by examining your SQL and rewriting it */ /* to be a select statement so it can find the column types etc. This */ /* fails horribly when the statement does not contain a table */ /* e.g., "select ?, LEN(?)" and so do most other SQL Server drivers. */ /* */ /************************************************************************/ static void get_param_type( SV *sth, imp_sth_t *imp_sth, imp_dbh_t *imp_dbh, phs_t *phs) { SWORD fNullable; SWORD ibScale; RETCODE rc; if (DBIc_TRACE(imp_sth, DBD_TRACING, 0, 4)) TRACE2(imp_sth, " +get_param_type(%p,%s)\n", sth, phs->name); if (imp_sth->odbc_force_bind_type != 0) { phs->sql_type = imp_sth->odbc_force_bind_type; if (DBIc_TRACE(imp_sth, DBD_TRACING, 0, 4)) TRACE1(imp_dbh, " forced param type to %d\n", phs->sql_type); } else if (imp_dbh->odbc_sqldescribeparam_supported != 1) { /* As SQLDescribeParam is not supported by the ODBC driver we need to default a SQL type to bind the parameter as. The default is either the value set with odbc_default_bind_type or a fallback of SQL_VARCHAR/SQL_WVARCHAR depending on your data and whether we are unicode build. */ phs->sql_type = default_parameter_type( "SQLDescribeParam not supported", imp_sth, phs); } else if (!imp_sth->odbc_describe_parameters) { phs->sql_type = default_parameter_type( "SQLDescribeParam disabled", imp_sth, phs); } else if (!phs->describe_param_called) { /* If we haven't had a go at calling SQLDescribeParam before for this parameter, have a go now. If it fails we'll default the sql type as above when driver does not have SQLDescribeParam */ rc = SQLDescribeParam(imp_sth->hstmt, phs->idx, &phs->described_sql_type, &phs->param_size, &ibScale, &fNullable); phs->describe_param_called = 1; phs->describe_param_status = rc; if (!SQL_SUCCEEDED(rc)) { if (DBIc_TRACE(imp_sth, DBD_TRACING, 0, 4)) TRACE1(imp_dbh, " Parameter %d\n", phs->idx); phs->sql_type = default_parameter_type( "SQLDescribeParam failed", imp_sth, phs); /* show any odbc errors in log */ AllODBCErrors(imp_sth->henv, imp_sth->hdbc, imp_sth->hstmt, DBIc_TRACE(imp_sth, DBD_TRACING, 0, 3), DBIc_LOGPIO(imp_sth)); } else if (phs->described_sql_type == 0) { /* unknown SQL type */ /* pretend it failed */ phs->describe_param_status = SQL_ERROR; phs->sql_type = default_parameter_type( "SQLDescribeParam returned unknown SQL type", imp_sth, phs); } else { if (DBIc_TRACE(imp_sth, DBD_TRACING, 0, 5)) PerlIO_printf(DBIc_LOGPIO(imp_dbh), " SQLDescribeParam %s: SqlType=%s(%d) " "param_size=%ld Scale=%d Nullable=%d\n", phs->name, S_SqlTypeToString(phs->described_sql_type), phs->described_sql_type, (unsigned long)phs->param_size, ibScale, fNullable); /* * for non-integral numeric types, let the driver/database handle * the conversion for us */ switch(phs->described_sql_type) { case SQL_NUMERIC: case SQL_DECIMAL: case SQL_FLOAT: case SQL_REAL: case SQL_DOUBLE: if (DBIc_TRACE(imp_sth, DBD_TRACING, 0, 5)) TRACE3(imp_dbh, " Param %s is numeric SQL type %s " "(param size:%lu) changed to SQL_VARCHAR\n", phs->name, S_SqlTypeToString(phs->described_sql_type), (unsigned long)phs->param_size); phs->sql_type = SQL_VARCHAR; break; default: { check_for_unicode_param(imp_sth, phs); break; } } } } else if (phs->describe_param_called) { if (DBIc_TRACE(imp_sth, DBD_TRACING, 0, 5)) TRACE1(imp_dbh, " SQLDescribeParam already run and returned rc=%d\n", phs->describe_param_status); check_for_unicode_param(imp_sth, phs); } if (phs->requested_type != 0) { phs->sql_type = phs->requested_type; if (DBIc_TRACE(imp_sth, DBD_TRACING, 0, 5)) TRACE1(imp_dbh, " Overriding sql type with requested type %d\n", phs->requested_type); } #if defined(WITH_UNICODE) /* for Unicode string types, change value_type to SQL_C_WCHAR*/ switch (phs->sql_type) { case SQL_WCHAR: case SQL_WVARCHAR: case SQL_WLONGVARCHAR: case MS_SQLS_XML_TYPE: /* SQL Server XML Type */ phs->value_type = SQL_C_WCHAR; if (DBIc_TRACE(imp_sth, DBD_TRACING, 0, 8)) { TRACE0(imp_dbh, " get_param_type: modified value type to SQL_C_WCHAR\n"); } break; } #endif /* WITH_UNICODE */ if (DBIc_TRACE(imp_sth, DBD_TRACING, 0, 8)) TRACE0(imp_dbh, " -get_param_type\n"); } /*======================================================================*/ /* */ /* rebind_param */ /* ============ */ /* */ /*======================================================================*/ static int rebind_param( pTHX_ SV *sth, imp_sth_t *imp_sth, imp_dbh_t *imp_dbh, phs_t *phs) { SQLRETURN rc; SQLULEN default_column_size; STRLEN value_len = 0; /* args of SQLBindParameter() call */ SQLSMALLINT param_io_type; /* SQL_PARAM_INPUT_OUTPUT || SQL_PARAM_INPUT */ SQLSMALLINT value_type; /* C data type of parameter */ UCHAR *value_ptr; /* ptr to actual parameter data */ SQLULEN column_size; /* size of column/expression of the parameter */ SQLSMALLINT d_digits; /* decimal digits of parameter */ SQLLEN buffer_length; /* length in bytes of parameter buffer */ SQLLEN strlen_or_ind; /* parameter length or indicator */ if (DBIc_TRACE(imp_sth, DBD_TRACING, 0, 4)) { PerlIO_printf( DBIc_LOGPIO(imp_dbh), " +rebind_param %s %.100s (size SvCUR=%"UVuf"/SvLEN=%"UVuf"/max=%"IVdf") " "svtype:%u, value type:%d, sql type:%d\n", phs->name, neatsvpv(phs->sv, 0), SvOK(phs->sv) ? (UV)SvCUR(phs->sv) : -1, SvOK(phs->sv) ? (UV)SvLEN(phs->sv) : -1 ,phs->maxlen, SvTYPE(phs->sv), phs->value_type, phs->sql_type); } if (phs->is_inout) { /* * At the moment we always do sv_setsv() and rebind. * Later we may optimise this so that more often we can * just copy the value & length over and not rebind. */ if (SvREADONLY(phs->sv)) Perl_croak(aTHX_ "%s", PL_no_modify); /* phs->sv _is_ the real live variable, it may 'mutate' later */ /* pre-upgrade high to reduce risk of SvPVX realloc/move */ (void)SvUPGRADE(phs->sv, SVt_PVNV); /* ensure room for result, 28 is magic number (see sv_2pv) */ #if defined(WITH_UNICODE) SvGROW(phs->sv, (phs->maxlen + sizeof(SQLWCHAR) < 28) ? 28 : phs->maxlen + sizeof(SQLWCHAR)); #else SvGROW(phs->sv, (phs->maxlen < 28) ? 28 : phs->maxlen+1); #endif /* WITH_UNICODE */ phs->svok = SvOK(phs->sv); } else { /* phs->sv is copy of real variable, upgrade to at least string */ (void)SvUPGRADE(phs->sv, SVt_PV); } /* * At this point phs->sv must be at least a PV with a valid buffer, * even if it's undef (null) */ if (SvOK(phs->sv)) { phs->sv_buf = SvPV(phs->sv, value_len); } else { /* it's undef but if it was inout param it would point to a * valid buffer, at least */ phs->sv_buf = SvPVX(phs->sv); value_len = 0; } get_param_type(sth, imp_sth, imp_dbh, phs); #if defined(WITH_UNICODE) if (phs->value_type == SQL_C_WCHAR) { if (DBIc_TRACE(imp_sth, DBD_TRACING, 0, 8)) { TRACE1(imp_dbh, " Need to modify phs->sv in place: old length = %lu\n", value_len); } /* Convert the sv in place to UTF-16 encoded characters NOTE: the SV_toWCHAR may modify SvPV(phs->sv */ if (SvOK(phs->sv)) { SV_toWCHAR(aTHX_ phs->sv); /* get new buffer and length */ phs->sv_buf = SvPV(phs->sv, value_len); } else { /* it is undef */ /* need a valid buffer at least */ phs->sv_buf = SvPVX(phs->sv); value_len = 0; } if (DBIc_TRACE(imp_sth, DBD_TRACING, 0, 8)) { TRACE1(imp_dbh, " Need to modify phs->sv in place: new length = %lu\n", value_len); } } /* value_len has current value length now */ phs->sv_type = SvTYPE(phs->sv); /* part of mutation check */ phs->maxlen = SvLEN(phs->sv) - sizeof(SQLWCHAR); /* avail buffer space */ #else /* !WITH_UNICODE */ /* value_len has current value length now */ phs->sv_type = SvTYPE(phs->sv); /* part of mutation check */ phs->maxlen = SvLEN(phs->sv) - 1; /* avail buffer space */ #endif /* WITH_UNICODE */ if (DBIc_TRACE(imp_sth, DBD_TRACING, 0, 4)) { PerlIO_printf( DBIc_LOGPIO(imp_dbh), " bind %s %.100s value_len=%"UVuf" maxlen=%ld null=%d)\n", phs->name, neatsvpv(phs->sv, value_len), (UV)value_len,(long)phs->maxlen, SvOK(phs->sv) ? 0 : 1); } /* * JLU: was SQL_PARAM_OUTPUT only, but that caused a problem with * Oracle's drivers and in/out parameters. Can't be output only * with Oracle. */ param_io_type = phs->is_inout ? SQL_PARAM_INPUT_OUTPUT : SQL_PARAM_INPUT; value_type = phs->value_type; d_digits = value_len; column_size = phs->is_inout ? phs->maxlen : value_len; /* per patch from Paul G. Weiss, who was experiencing re-preparing * of queries when the size of the bound string's were increasing * for example select * from tabtest where name = ? * then executing with 'paul' and then 'thomas' would cause * SQLServer to prepare the query twice, but if we ran 'thomas' * then 'paul', it would not re-prepare the query. The key seems * to be allocating enough space for the largest parameter. * TBD: the default for this should be a DBD::ODBC specific option * or attribute. */ if (phs->sql_type == SQL_VARCHAR && !phs->is_inout) { d_digits = 0; /* default to at least 80 if this is the first time through */ if (phs->biggestparam == 0) { phs->biggestparam = (value_len > 80) ? value_len : 80; } else { /* bump up max, if needed */ if (value_len > phs->biggestparam) { phs->biggestparam = value_len; } } } if ((phs->describe_param_called == 1) && (SQL_SUCCEEDED(phs->describe_param_status)) && (phs->requested_type == 0)) { /* type not overriden */ default_column_size = phs->param_size; } else if (phs->is_inout) { default_column_size = phs->maxlen; } else { if (phs->sql_type == SQL_VARCHAR) { default_column_size = phs->biggestparam; } else { default_column_size = value_len; } } /* Default buffer_length to specified output length or actual input length */ buffer_length = phs->is_inout ? phs->maxlen : value_len; /* When we fill a LONGVARBINARY, the CTYPE must be set to SQL_C_BINARY */ if (value_type == SQL_C_CHAR) { /* could be changed by bind_plh */ d_digits = 0; /* not relevent to char types */ switch(phs->sql_type) { case SQL_LONGVARBINARY: case SQL_BINARY: case SQL_VARBINARY: value_type = SQL_C_BINARY; column_size = default_column_size; break; #ifdef SQL_WLONGVARCHAR case SQL_WLONGVARCHAR: /* added for SQLServer 7 ntext type */ #endif case SQL_CHAR: case SQL_VARCHAR: case SQL_LONGVARCHAR: column_size = default_column_size; break; case SQL_DATE: case SQL_TYPE_DATE: case SQL_TIME: case SQL_TYPE_TIME: break; case SQL_TIMESTAMP: case SQL_TYPE_TIMESTAMP: d_digits = 0; /* tbd: millisecondS?) */ if (SvOK(phs->sv)) { /* Work out decimal digits value from milliseconds */ char *cp; if (phs->sv_buf && *phs->sv_buf) { cp = strchr(phs->sv_buf, '.'); if (cp) { ++cp; while (*cp != '\0' && isdigit(*cp)) { cp++; d_digits++; } } } } /* * 23 is YYYY-MM-DD HH:MM:SS.sss * We have to be really careful here to maintain the column size * whether we are passing NULL/undef or not as the ODBC driver * only has the values we pass to SQLBindParameter to go on and * cannot know until execute time whether we are passing a NULL or * not (i.e., although we pass the strlen_or_ind value - last arg to * SQLBindParameter with a length or SQL_NULL_DATA, this is a ptr * arg and the driver cannot look at it until execute time). * We may know we are going to pass a NULL but if we reduce the * the column size to 0 (or as this function used to do - 1) the * driver might decide it is not a full datetime and decide to * bind as a smalldatetime etc. In fact there is a test for * MS SQL Server in 20SqlServer which binds a datetime and passes * a NULL, a full datetime and lastly a NULL and if we don't maintain * 23 for the first NULL MS SQL Server decides it is a smalldatetime * and we lose the SS.sss in any full datetime passed later. */ column_size = 23; break; default: break; } } else if ( value_type == SQL_C_WCHAR) { d_digits = 0; } if (!SvOK(phs->sv)) { strlen_or_ind = SQL_NULL_DATA; /* if is_inout, shouldn't we null terminate the buffer and send * it, instead?? */ if (!phs->is_inout) { /* * We have to be really careful here to maintain the column size * whether we are passing NULL/undef or not as the ODBC driver * only has the values we pass to SQLBindParameter to go on and * cannot know until execute time whether we are passing a NULL or * not (i.e., although we pass the strlen_or_ind value - last arg to * SQLBindParameter with a length or SQL_NULL_DATA, this is a ptr * arg and the driver cannot look at it until execute time). * We may know we are going to pass a NULL but if we reduce the * the column size to 0 (or as this function used to do - 1) the * driver might decide it is a different type (e.g., smalldatetime * instead of datetime). * In fact there is a test for MS SQL Server in 20SqlServer which * binds a datetime and passes a NULL, a full datetime and lastly a * NULL and if we don't maintain the column_size for the first NULL * MS SQL Server decides it is a smalldatetime and we lose the * SS.sss in any full datetime passed later despite setting a correct * column_size and decimal digits. */ /* column_size = 1; Used to be this but see comment above */ /* * However, at this stage we could have column_size of 0 and * that is no good either or we'll get invalid precision */ if (column_size == 0) column_size = 1; } if (phs->is_inout) { if (!phs->sv_buf) { croak("panic: DBD::ODBC binding undef with bad buffer!!!!"); } /* just in case, we *know* we called SvGROW above */ phs->sv_buf[0] = '\0'; /* patch for binding undef inout params on sql server */ d_digits = 1; value_ptr = phs->sv_buf; } else { value_ptr = NULL; } } else { value_ptr = phs->sv_buf; strlen_or_ind = value_len; /* not undef, may be a blank string or something */ if (!phs->is_inout && strlen_or_ind == 0) { column_size = 1; } } if (DBIc_TRACE(imp_sth, DBD_TRACING, 0, 4)) { PerlIO_printf(DBIc_LOGPIO(imp_dbh), " bind %s value_type:%d %s cs=%lu dd=%d bl=%ld\n", phs->name, value_type, S_SqlTypeToString(phs->sql_type), (unsigned long)column_size, d_digits, buffer_length); } if (value_len < imp_sth->odbc_putdata_start) { /* already set and should be left alone JLU */ /* d_digits = value_len; */ } else { SQLLEN vl = value_len; if (DBIc_TRACE(imp_sth, DBD_TRACING, 0, 4)) TRACE1(imp_dbh, " using data_at_exec for size %lu\n", (unsigned long)value_len); d_digits = 0; /* not relevant to lobs */ strlen_or_ind = SQL_LEN_DATA_AT_EXEC(vl); value_ptr = (UCHAR*) phs; } #if THE_FOLLOWING_CODE_IS_FLAWED_AND_BROKEN /* * value_ptr is not null terminated - it is a byte array so PVallocW * won't work as it works on null terminated strings */ #if defined(WITH_UNICODE) if (value_type==SQL_C_WCHAR) { char * c1; c1 = PVallocW((SQLWCHAR *)value_ptr); TRACE1(imp_dbh, " Param value = L'%s'\n", c1); PVfreeW(c1); } #endif /* WITH_UNICODE */ #endif /* * The following code is a workaround for a problem in SQL Server * when inserting more than 400K into varbinary(max) or varchar(max) * columns. The older SQL Server driver (not the native client driver): * * o reports the size of xxx(max) columns as 2147483647 bytes in size * when in reality they can be a lot bigger than that. * o if you bind more than 400K you get the following errors: * (HY000, 0, [Microsoft][ODBC SQL Server Driver] * Warning: Partial insert/update. The insert/update of a text or * image column(s) did not succeed.) * (42000, 7125, [Microsoft][ODBC SQL Server Driver][SQL Server] * The text, ntext, or image pointer value conflicts with the column * name specified.) * * There appear to be 2 workarounds but I was not prepared to do the first. * The first is simply to set the indicator to SQL_LEN_DATA_AT_EXEC(409600) * if the parameter was larger than 409600 - miraculously it works but * shouldn't according to MSDN. * The second workaround (used here) is to set the indicator to * SQL_LEN_DATA_AT_EXEC(0) and the buffer_length to 0. * */ if ((imp_dbh->driver_type == DT_SQL_SERVER) && ((phs->sql_type == SQL_LONGVARCHAR) || (phs->sql_type == SQL_LONGVARBINARY) || (phs->sql_type == SQL_WLONGVARCHAR)) && /*(column_size == 2147483647) && (strlen_or_ind < 0) &&*/ ((-strlen_or_ind + SQL_LEN_DATA_AT_EXEC_OFFSET) >= 409600)) { strlen_or_ind = SQL_LEN_DATA_AT_EXEC(0); buffer_length = 0; } #if defined(WITH_UNICODE) /* * rt43384 - MS Access does not seem to like us binding parameters as * wide characters and then SQLBindParameter column_size to byte length. * e.g., if you have a text(255) column and try and insert 190 ascii chrs * then the unicode enabled version of DBD::ODBC will convert those 190 * ascii chrs to wide chrs and hence double the size to 380. If you pass * 380 to Access for column_size it just returns an invalid precision * value. This changes to column_size to chrs instead of bytes but * only if column_size is not reduced to 0 - which also produces * an access error e.g., in the empty string '' case. */ else if (((imp_dbh->driver_type == DT_MS_ACCESS_JET) || (imp_dbh->driver_type == DT_MS_ACCESS_ACE)) && (value_type == SQL_C_WCHAR) && (column_size > 1)) { column_size = column_size / 2; if (DBIc_TRACE(imp_sth, DBD_TRACING, 0, 4)) TRACE0(imp_dbh, " MSAccess - setting chrs not bytes\n"); } #endif /* * workaround bug in SQL Server ODBC driver where it can describe some * parameters (especially in SQL using sub selects) the wrong way. * If this is a varchar then the column_size must be at least as big * as the buffer size but if SQL Server associated the wrong column with * our parameter it could get a totally different size. Without this * a varchar(10) column can be desribed as a varchar(n) where n is less * than 10 and this leads to data truncation errors - see rt 39841. */ if (((imp_dbh->driver_type == DT_SQL_SERVER) || (imp_dbh->driver_type == DT_SQL_SERVER_NATIVE_CLIENT)) && (phs->sql_type == SQL_VARCHAR) && (column_size < buffer_length)) { column_size = buffer_length; } /* * Yet another workaround for SQL Server native client. * If you have a varbinary(max), varchar(max) or nvarchar(max) you have to * pass 0 for the column_size or you get HY104 "Invalid precision value". * See rt_38977.t which causes this. * The versions of native client I've seen this with are: * 2007.100.1600.22 sqlncli10.dll driver version = ? * 2005.90.1399.00 SQLNCLI.DLL driver version = 09.00.1399 * * Update, for nvarchar(max) it does not seem to simply be a driver issue * as with the Easysoft SQL Server ODBC Driver going to Microsoft SQL Server * 09.00.1399 we got the following error for all sizes between 4001 and 8000 * (inclusive). * [SQL Server]The size (4001) given to the parameter '@P1' exceeds the * maximum allowed (4000) * * Update, see RT100186 - same applies to VARBINARY(MAX) * * So to sum up for the native client when the parameter size is 0 or * when the database is sql server and wchar and sql type not overwritten * we need to use column size 0. We cannot do this if the requested_type * was specified as if someone specifies a bind type we haven't called * SQLDescribeParam and it looks like param_size = 0 even when it is * not a xxx(max). e.g., the 40UnicodeRoundTrip tests will fail with * MS SQL Server because they override the type. */ if ((phs->param_size == 0) && (SQL_SUCCEEDED(phs->describe_param_status)) && (imp_sth->odbc_describe_parameters)) { /* SQLDescribeParam not disabled */ /* no point in believing param_size = 0 if SQLDescribeParam failed */ /* See rt 55736 */ if ((imp_dbh->driver_type == DT_SQL_SERVER_NATIVE_CLIENT) || ((strcmp(imp_dbh->odbc_dbms_name, "Microsoft SQL Server") == 0) && ((phs->sql_type == SQL_WVARCHAR) || (phs->sql_type == SQL_VARBINARY)) && (phs->requested_type == 0))) { column_size = 0; } } /* for rt_38977 we get: * sloi = -500100 ps=0 sqlt=12 (SQL_VARCHAR) * sloi = -500100 ps=0 sqlt=-3 (SQL_VARBINARY) * sloi = 4001 ps=0 sqlt=-9 (SQL_WVARCHAR) <--- this one fails without above */ /*printf("sloi = %d ps=%d sqlt=%d\n", strlen_or_ind, phs->param_size, phs->sql_type);*/ /* * Avoid calling SQLBindParameter again if nothing has changed. * Why, because a) there is no point and b) MS SQL Server will * re-prepare the statement. */ /* phs' copy of strlen_or_ind is permanently allocated and the other strlen_or_ind is an automatic variable and won't survive this func but needs to. */ phs->strlen_or_ind = strlen_or_ind; if ((param_io_type == SQL_PARAM_INPUT_OUTPUT) || (!phs->bp_value_ptr) || /* not bound before */ ((param_io_type == SQL_PARAM_INPUT) && /* input parameter */ ((value_ptr != phs->bp_value_ptr) || (value_type != phs->value_type) || (column_size != phs->bp_column_size) || (d_digits != phs->bp_d_digits) || (buffer_length != phs->bp_buffer_length)))) { if (DBIc_TRACE(imp_sth, DBD_TRACING, 0, 5)) { PerlIO_printf( DBIc_LOGPIO(imp_dbh), " SQLBindParameter: idx=%d: io_type=%d, name=%s, " "value_type=%d (%s), SQLType=%d (%s), column_size=%lu, " "d_digits=%d, value_ptr=%p, buffer_length=%ld, ind=%ld, " "param_size=%lu\n", phs->idx, param_io_type, phs->name, value_type, S_SqlCTypeToString(value_type), phs->sql_type, S_SqlTypeToString(phs->sql_type), (unsigned long)column_size, d_digits, value_ptr, (long)buffer_length, (long)strlen_or_ind, (unsigned long)phs->param_size); /* avoid tracing data_at_exec as value_ptr will point to phs */ if ((value_type == SQL_C_CHAR) && (strlen_or_ind > 0)) { TRACE1(imp_sth, " Param value = %s\n", value_ptr); } } #ifdef FRED printf("SQLBindParameter idx=%d pt=%d vt=%d, st=%d, cs=%lu dd=%d vp=%p bl=%ld slorind=%ld %s\n", phs->idx, param_io_type, value_type, phs->sql_type, (unsigned long)column_size, d_digits, value_ptr, buffer_length, (long)phs->strlen_or_ind, value_ptr); #endif rc = SQLBindParameter(imp_sth->hstmt, phs->idx, param_io_type, value_type, phs->sql_type, column_size, d_digits, value_ptr, buffer_length, &phs->strlen_or_ind); if (!SQL_SUCCEEDED(rc)) { dbd_error(sth, rc, "rebind_param/SQLBindParameter"); phs->bp_value_ptr = NULL; return 0; } phs->bp_value_ptr = value_ptr; phs->value_type = value_type; phs->bp_column_size = column_size; phs->bp_d_digits = d_digits; phs->bp_buffer_length = buffer_length; } else if (DBIc_TRACE(imp_sth, DBD_TRACING, 0, 5)) { TRACE1(imp_sth, " Not rebinding param %d - no change\n", phs->idx); } if (DBIc_TRACE(imp_sth, DBD_TRACING, 0, 4)) TRACE0(imp_dbh, " -rebind_param\n"); return 1; } int dbd_st_bind_col( SV *sth, imp_sth_t *imp_sth, SV *col, SV *ref, IV type, SV *attribs) { dTHX; int field; if (!SvIOK(col)) { croak ("Invalid column number") ; } field = SvIV(col); if ((field < 1) || (field > DBIc_NUM_FIELDS(imp_sth))) { croak("cannot bind to non-existent field %d", field); } /* Don't allow anyone to change the bound type after the column is bound or horrible things could happen e.g., say you just call SQLBindCol without a type it will probably default to SQL_C_CHAR but if you later called bind_col specifying SQL_INTEGER the code here would interpret the buffer as a 4 byte integer but in reality it would be written as a char*. We issue a warning but don't change the actual type. */ if (imp_sth->fbh[field-1].bound && type && imp_sth->fbh[field-1].bound != type) { DBIh_SET_ERR_CHAR( sth, (imp_xxh_t*)imp_sth, "0", 0, "you cannot change the bind column type after the column is bound", "", "fetch"); return 1; } /* The first problem we have is that SQL_xxx values DBI defines are not the same as SQL_C_xxx values we pass the SQLBindCol and in some cases there is no C equivalent e.g., SQL_DECIMAL - there is no C type for these. The second problem we have is that values passed to SQLBindCol cause the ODBC driver to return different C types OR structures e.g., SQL_NUMERIC returns a structure. We're not binding columns as C structures as they are too hard to convert into Perl scalars - we'll just use SQL_C_CHAR/SQL_C_WCHAR for these. There is an exception for timestamps as the code later will bind as a timestamp if it spots the column is a timestamp and pull the structure apart. We do however store the requested type if it SQL_DOUBLE/SQL_NUMERIC so we can use it with sql_type_cast_svpv i.e., if you know the column is a double or numeric we still retrieve it as a char string but then if DiscardString or StrictlyTyped if specified we'lll call sql_type_cast_svpv. */ if (type == SQL_DOUBLE || type == SQL_NUMERIC) { imp_sth->fbh[field-1].req_type = type; } if (attribs) { /* attributes are sticky */ imp_sth->fbh[field-1].bind_flags = 0; /* default to none */ } /* DBIXS 13590 added StrictlyTyped and DiscardString attributes */ if (attribs) { SV **svp; DBD_ATTRIBS_CHECK("dbd_st_bind_col", sth, attribs); if (DBD_ATTRIB_TRUE(attribs, "TreatAsLOB", 10, svp)) { imp_sth->fbh[field-1].bind_flags |= ODBC_TREAT_AS_LOB; } #if DBIXS_REVISION >= 13590 if (DBD_ATTRIB_TRUE(attribs, "StrictlyTyped", 13, svp)) { imp_sth->fbh[field-1].bind_flags |= DBIstcf_STRICT; } if (DBD_ATTRIB_TRUE(attribs, "DiscardString", 13, svp)) { imp_sth->fbh[field-1].bind_flags |= DBIstcf_DISCARD_STRING; } #endif /* DBIXS_REVISION >= 13590 */ } if (DBIc_TRACE(imp_sth, DBD_TRACING, 0, 4)) { TRACE3(imp_sth, " bind_col %d requested type:%"IVdf", flags:%lx\n", field, imp_sth->fbh[field-1].req_type, imp_sth->fbh[field-1].bind_flags); } return 1; } /*------------------------------------------------------------ * bind placeholder. * Is called from ODBC.xs execute() * AND from ODBC.xs bind_param() */ int dbd_bind_ph( SV *sth, imp_sth_t *imp_sth, SV *ph_namesv, SV *newvalue, IV in_sql_type, SV *attribs, int is_inout, IV maxlen) { dTHX; SV **phs_svp; STRLEN name_len; char *name; char namebuf[30]; phs_t *phs; D_imp_dbh_from_sth; SQLSMALLINT sql_type; if (SQL_NULL_HDBC == imp_dbh->hdbc) { DBIh_SET_ERR_CHAR(sth, (imp_xxh_t*)imp_sth, Nullch, 1, "Database handle has been disconnected", Nullch, Nullch); return -2; } sql_type = (SQLSMALLINT)in_sql_type; if (SvNIOK(ph_namesv) ) { /* passed as a number */ name = namebuf; my_snprintf(name, sizeof(namebuf), "%d", (int)SvIV(ph_namesv)); name_len = strlen(name); } else { name = SvPV(ph_namesv, name_len); } if (DBIc_TRACE(imp_sth, DBD_TRACING, 0, 4)) { PerlIO_printf( DBIc_LOGPIO(imp_dbh), " +dbd_bind_ph(%p, name=%s, value=%.200s, attribs=%s, " "sql_type=%d(%s), is_inout=%d, maxlen=%"IVdf"\n", sth, name, SvOK(newvalue) ? neatsvpv(newvalue, 0) : "undef", attribs ? SvPV_nolen(attribs) : "", sql_type, S_SqlTypeToString(sql_type), is_inout, maxlen); } /* the problem with the code below is we are getting SVt_PVLV when * an "undef" value from a hash lookup that doesn't exist. It's an * "undef" value, but it doesn't come in as a scalar. * from a hash is arriving. Let's leave this out until we are * handling arrays. JLU 7/12/02 */ #if 0 if (SvTYPE(newvalue) > SVt_PVMG) { /* hook for later array logic */ if (DBIc_TRACE(imp_sth, DBD_TRACING, 0, 3)) TRACE2(imp_sth, " !!bind %s perl type = %d -- croaking!\n", name, SvTYPE(newvalue)); croak("Can't bind non-scalar value (currently)"); } #endif if (SvROK(newvalue) && !SvAMAGIC(newvalue)) { croak("Cannot bind a plain reference"); } /* * all_params_hv created during dbd_preparse. */ phs_svp = hv_fetch(imp_sth->all_params_hv, name, (I32)name_len, 0); if (phs_svp == NULL) croak("Can't bind unknown placeholder '%s'", name); phs = (phs_t*)SvPVX(*phs_svp); /* placeholder struct */ if (phs->sv == &PL_sv_undef) { /* first bind for this placeholder */ if (DBIc_TRACE(imp_sth, DBD_TRACING, 0, 4)) TRACE0(imp_sth, " First bind of this placeholder\n"); phs->value_type = SQL_C_CHAR; /* default */ phs->requested_type = sql_type; /* save type requested */ phs->maxlen = maxlen; /* 0 if not inout */ phs->is_inout = is_inout; if (is_inout) { /* TO_DO then later sv is tested not to be newvalue !!! */ phs->sv = SvREFCNT_inc(newvalue); /* point to live var */ imp_sth->has_inout_params++; /* build array of phs's so we can deal with out vars fast */ if (!imp_sth->out_params_av) imp_sth->out_params_av = newAV(); av_push(imp_sth->out_params_av, SvREFCNT_inc(*phs_svp)); } } else { if (sql_type) { /* parameter attributes are supposed to be sticky until overriden so only replace requested_type if sql_type specified. See https://rt.cpan.org/Ticket/Display.html?id=46597 */ phs->requested_type = sql_type; /* save type requested */ } if (is_inout != phs->is_inout) { croak("Can't rebind or change param %s in/out mode after first bind " "(%d => %d)", phs->name, phs->is_inout, is_inout); } if (maxlen && maxlen > phs->maxlen) { if (DBIc_TRACE(imp_sth, DBD_TRACING, 0, 4)) PerlIO_printf(DBIc_LOGPIO(imp_dbh), "!attempt to change param %s maxlen (%"IVdf"->%"IVdf")\n", phs->name, phs->maxlen, maxlen); croak("Can't change param %s maxlen (%"IVdf"->%"IVdf") after first bind", phs->name, phs->maxlen, maxlen); } } if (!is_inout) { /* normal bind to take a (new) copy of current value */ if (phs->sv == &PL_sv_undef) /* (first time bind) */ phs->sv = newSV(0); sv_setsv(phs->sv, newvalue); if (SvAMAGIC(phs->sv)) /* if it has any magic force to string */ sv_pvn_force(phs->sv, &PL_na); } else if (newvalue != phs->sv) { if (phs->sv) { if (DBIc_TRACE(imp_sth, DBD_TRACING, 0, 4)) TRACE0(imp_sth, " Decrementing ref count on placeholder\n"); SvREFCNT_dec(phs->sv); } phs->sv = SvREFCNT_inc(newvalue); /* point to live var */ } if (imp_dbh->odbc_defer_binding) { get_param_type(sth, imp_sth, imp_dbh, phs); if (DBIc_TRACE(imp_sth, DBD_TRACING, 0, 4)) TRACE0(imp_dbh, " -dbd_bind_ph=1\n"); return 1; } /* fall through for "immediate" binding */ if (DBIc_TRACE(imp_sth, DBD_TRACING, 0, 4)) TRACE0(imp_dbh, " -dbd_bind_ph=rebind_param\n"); return rebind_param(aTHX_ sth, imp_sth, imp_dbh, phs); } /*------------------------------------------------------------ * blob_read: * read part of a BLOB from a table. * XXX needs more thought */ int dbd_st_blob_read(sth, imp_sth, field, offset, len, destrv, destoffset) SV *sth; imp_sth_t *imp_sth; int field; long offset; long len; SV *destrv; long destoffset; { dTHX; SQLLEN retl; SV *bufsv; RETCODE rc; croak("blob_read not supported yet"); bufsv = SvRV(destrv); sv_setpvn(bufsv,"",0); /* ensure it's writable string */ SvGROW(bufsv, len+destoffset+1); /* SvGROW doesn't do +1 */ /* XXX for this to work be probably need to avoid calling SQLGetData in * fetch. The definition of SQLGetData doesn't work well with the DBI's * notion of how LongReadLen would work. Needs more thought. */ rc = SQLGetData(imp_sth->hstmt, (SQLSMALLINT)(field+1), SQL_C_BINARY, ((UCHAR *)SvPVX(bufsv)) + destoffset, len, &retl ); if (DBIc_TRACE(imp_sth, DBD_TRACING, 0, 4)) PerlIO_printf( DBIc_LOGPIO(imp_sth), "SQLGetData(...,off=%ld, len=%ld)->rc=%d,len=%ld SvCUR=%"UVuf"\n", destoffset, len, rc, (long)retl, (UV)SvCUR(bufsv)); if (!SQL_SUCCEEDED(rc)) { dbd_error(sth, rc, "dbd_st_blob_read/SQLGetData"); return 0; } if (rc == SQL_SUCCESS_WITH_INFO) { /* XXX should check for 01004 */ retl = len; } if (retl == SQL_NULL_DATA) { /* field is null */ (void)SvOK_off(bufsv); return 1; } #ifdef SQL_NO_TOTAL if (retl == SQL_NO_TOTAL) { /* unknown length! */ (void)SvOK_off(bufsv); return 0; } #endif SvCUR_set(bufsv, destoffset+retl); *SvEND(bufsv) = '\0'; /* consistent with perl sv_setpvn etc */ if (DBIc_TRACE(imp_sth, DBD_TRACING, 0, 4)) TRACE1(imp_sth, " blob_read: SvCUR=%"UVuf"\n", (UV)SvCUR(bufsv)); return 1; } /*======================================================================*/ /* */ /* S_db_storeOptions */ /* ================= */ /* S_db_fetchOptions */ /* ================= */ /* */ /* An array of options/attributes we support on database handles for */ /* storing and fetching. */ /* */ /*======================================================================*/ enum paramdir { PARAM_READ = 1, PARAM_WRITE = 2, PARAM_READWRITE = 3 }; enum gettype {PARAM_TYPE_CUSTOM = 0, PARAM_TYPE_UINT, PARAM_TYPE_STR, PARAM_TYPE_BOOL}; typedef struct { const char *str; UWORD fOption; enum paramdir dir; enum gettype type; UDWORD atrue; UDWORD afalse; } db_params; static db_params S_db_options[] = { { "AutoCommit", SQL_AUTOCOMMIT, PARAM_READWRITE, PARAM_TYPE_BOOL, SQL_AUTOCOMMIT_ON, SQL_AUTOCOMMIT_OFF }, { "ReadOnly", SQL_ATTR_ACCESS_MODE, PARAM_READWRITE, PARAM_TYPE_BOOL, SQL_MODE_READ_ONLY, SQL_MODE_READ_WRITE}, { "RowCacheSize", ODBC_ROWCACHESIZE, PARAM_READ, PARAM_TYPE_CUSTOM }, #if 0 /* not defined by DBI/DBD specification */ { "TRANSACTION", SQL_ACCESS_MODE, PARAM_READWRITE, PARAM_TYPE_BOOL, SQL_MODE_READ_ONLY, SQL_MODE_READ_WRITE }, { "solid_timeout", SQL_LOGIN_TIMEOUT, PARAM_READWRITE, PARAM_TYPE_UINT }, { "ISOLATION", PARAM_READWRITE, PARAM_TYPE_UINT, SQL_TXN_ISOLATION }, #endif { "odbc_SQL_DBMS_NAME", SQL_DBMS_NAME, PARAM_READ, PARAM_TYPE_CUSTOM, }, { "odbc_SQL_DRIVER_ODBC_VER", SQL_DRIVER_ODBC_VER, PARAM_READ, PARAM_TYPE_CUSTOM }, { "odbc_SQL_ROWSET_SIZE", SQL_ROWSET_SIZE, PARAM_READWRITE, PARAM_TYPE_UINT }, { "odbc_ignore_named_placeholders", ODBC_IGNORE_NAMED_PLACEHOLDERS, PARAM_READWRITE, PARAM_TYPE_CUSTOM }, { "odbc_default_bind_type", ODBC_DEFAULT_BIND_TYPE, PARAM_READWRITE, PARAM_TYPE_CUSTOM }, { "odbc_force_bind_type", ODBC_FORCE_BIND_TYPE, PARAM_READWRITE, PARAM_TYPE_CUSTOM }, { "odbc_force_rebind", ODBC_FORCE_REBIND, PARAM_READWRITE, PARAM_TYPE_CUSTOM }, { "odbc_async_exec", ODBC_ASYNC_EXEC, PARAM_READWRITE, PARAM_TYPE_CUSTOM }, { "odbc_err_handler", ODBC_ERR_HANDLER, PARAM_READWRITE, PARAM_TYPE_CUSTOM }, { "odbc_exec_direct", ODBC_EXEC_DIRECT, PARAM_READWRITE, PARAM_TYPE_CUSTOM }, { "odbc_version", ODBC_VERSION, PARAM_READWRITE, PARAM_TYPE_CUSTOM }, { "odbc_cursortype", ODBC_CURSORTYPE, PARAM_READWRITE, PARAM_TYPE_CUSTOM }, { "odbc_query_timeout", ODBC_QUERY_TIMEOUT, PARAM_READWRITE, PARAM_TYPE_CUSTOM }, { "odbc_putdata_start", ODBC_PUTDATA_START, PARAM_READWRITE, PARAM_TYPE_CUSTOM }, { "odbc_column_display_size", ODBC_COLUMN_DISPLAY_SIZE, PARAM_READWRITE, PARAM_TYPE_CUSTOM }, { "odbc_utf8_on", ODBC_UTF8_ON, PARAM_READWRITE, PARAM_TYPE_CUSTOM }, { "odbc_has_unicode", ODBC_HAS_UNICODE, PARAM_READ, PARAM_TYPE_CUSTOM }, { "odbc_out_connect_string", ODBC_OUTCON_STR, PARAM_READ, PARAM_TYPE_CUSTOM}, { "odbc_describe_parameters", ODBC_DESCRIBE_PARAMETERS, PARAM_READWRITE, PARAM_TYPE_CUSTOM }, { "odbc_batch_size", ODBC_BATCH_SIZE, PARAM_READWRITE, PARAM_TYPE_CUSTOM }, { "odbc_array_operations", ODBC_ARRAY_OPERATIONS, PARAM_READWRITE, PARAM_TYPE_CUSTOM }, { "odbc_taf_callback", ODBC_TAF_CALLBACK, PARAM_READWRITE, PARAM_TYPE_CUSTOM }, {"odbc_trace", SQL_ATTR_TRACE, PARAM_READWRITE, PARAM_TYPE_BOOL, SQL_OPT_TRACE_ON, SQL_OPT_TRACE_OFF}, {"odbc_trace_file", SQL_ATTR_TRACEFILE, PARAM_READWRITE, PARAM_TYPE_STR, }, { NULL }, }; /*======================================================================*/ /* */ /* S_dbOption */ /* ========== */ /* */ /* Given a string and a length, locate this option in the specified */ /* array of valid options. Typically used by STORE and FETCH methods */ /* to decide if this option/attribute is supported by us. */ /* */ /*======================================================================*/ static const db_params * S_dbOption(const db_params *pars, char *key, STRLEN len) { /* search option to set */ while (pars->str != NULL) { if (strncmp(pars->str, key, len) == 0 && len == strlen(pars->str)) break; pars++; } if (pars->str == NULL) { return NULL; } return pars; } /*======================================================================*/ /* */ /* dbd_db_STORE_attrib */ /* =================== */ /* */ /* This function handles: */ /* */ /* $dbh->{$key} = $value */ /* */ /* Method to handle the setting of driver specific attributes and DBI */ /* attributes AutoCommit and ChopBlanks (no other DBI attributes). */ /* */ /* Return TRUE if the attribute was handled, else FALSE. */ /* */ /*======================================================================*/ int dbd_db_STORE_attrib(SV *dbh, imp_dbh_t *imp_dbh, SV *keysv, SV *valuesv) { dTHX; RETCODE rc; STRLEN kl; char *key = SvPV(keysv,kl); int on; SQLPOINTER vParam; const db_params *pars; SQLINTEGER attr_length = SQL_IS_UINTEGER; int bSetSQLConnectionOption; if ((pars = S_dbOption(S_db_options, key, kl)) == NULL) { if (DBIc_TRACE(imp_dbh, DBD_TRACING, 0, 3)) TRACE1(imp_dbh, " !!DBD::ODBC unsupported attribute passed (%s)\n", key); return FALSE; } else if (!(pars->dir & PARAM_WRITE)) { if (DBIc_TRACE(imp_dbh, DBD_TRACING, 0, 3)) TRACE1(imp_dbh, " !!DBD::ODBC attempt to set non-writable attribute (%s)\n", key); return FALSE; } else if (DBIc_TRACE(imp_dbh, DBD_TRACING, 0, 3)) { TRACE1(imp_dbh, " setting %s\n", key); } bSetSQLConnectionOption = TRUE; switch(pars->fOption) { case SQL_ATTR_LOGIN_TIMEOUT: case SQL_ATTR_TXN_ISOLATION: case SQL_ROWSET_SIZE: /* not ODBC 3 */ vParam = (SQLPOINTER)SvIV(valuesv); break; case SQL_ATTR_TRACE: if (SvTRUE(valuesv)) { vParam = (SQLPOINTER)pars->atrue; } else { vParam = (SQLPOINTER)pars->afalse; } break; case SQL_ATTR_TRACEFILE: vParam = (SQLPOINTER) SvPV_nolen(valuesv); attr_length = SQL_NTS; break; case ODBC_IGNORE_NAMED_PLACEHOLDERS: bSetSQLConnectionOption = FALSE; /* * set value to ignore placeholders. Will affect all * statements from here on. */ imp_dbh->odbc_ignore_named_placeholders = SvTRUE(valuesv); break; case ODBC_ARRAY_OPERATIONS: bSetSQLConnectionOption = FALSE; /* * set value to ignore placeholders. Will affect all * statements from here on. */ imp_dbh->odbc_array_operations = SvTRUE(valuesv); break; case ODBC_DEFAULT_BIND_TYPE: bSetSQLConnectionOption = FALSE; /* * set value of default bind type. Default is SQL_VARCHAR, * but setting to 0 will cause SQLDescribeParam to be used. */ imp_dbh->odbc_default_bind_type = (SQLSMALLINT)SvIV(valuesv); break; case ODBC_FORCE_BIND_TYPE: bSetSQLConnectionOption = FALSE; /* * set value of the forced bind type. Default is 0 * which means the bind type is not forced to be anything - * we will use SQLDescribeParam or fall back on odbc_default_bind_type */ imp_dbh->odbc_force_bind_type = (SQLSMALLINT)SvIV(valuesv); break; case ODBC_FORCE_REBIND: bSetSQLConnectionOption = FALSE; /* * set value to force rebind */ imp_dbh->odbc_force_rebind = SvTRUE(valuesv); break; case ODBC_QUERY_TIMEOUT: bSetSQLConnectionOption = FALSE; imp_dbh->odbc_query_timeout = (SQLINTEGER)SvIV(valuesv); break; case ODBC_PUTDATA_START: bSetSQLConnectionOption = FALSE; imp_dbh->odbc_putdata_start = SvIV(valuesv); break; case ODBC_BATCH_SIZE: bSetSQLConnectionOption = FALSE; imp_dbh->odbc_batch_size = SvIV(valuesv); if (imp_dbh->odbc_batch_size == 0) { croak("You cannot set odbc_batch_size to zero"); } break; case ODBC_TAF_CALLBACK: bSetSQLConnectionOption = FALSE; if (!SvOK(valuesv)) { rc = SQLSetConnectAttr(imp_dbh->hdbc, 1280 /*SQL_ATTR_REGISTER_TAF_CALLBACK */, NULL, SQL_IS_POINTER); if (!SQL_SUCCEEDED(rc)) { dbd_error(dbh, rc, "SQLSetConnectAttr for odbc_taf_callback"); return FALSE; } } else if (!SvROK(valuesv) || (SvTYPE(SvRV(valuesv)) != SVt_PVCV)) { croak("Need a code reference for odbc_taf_callback"); } else { SvREFCNT_inc(valuesv); imp_dbh->odbc_taf_callback = valuesv; rc = SQLSetConnectAttr(imp_dbh->hdbc, 1280 /*SQL_ATTR_REGISTER_TAF_CALLBACK */, &taf_callback_wrapper, SQL_IS_POINTER); if (!SQL_SUCCEEDED(rc)) { dbd_error(dbh, rc, "SQLSetConnectAttr for odbc_taf_callback"); return FALSE; } /* Pass our dbh into the callback */ rc = SQLSetConnectAttr(imp_dbh->hdbc, 1281 /*SQL_ATTR_REGISTER_TAF_HANDLE*/, dbh, SQL_IS_POINTER); if (!SQL_SUCCEEDED(rc)) { dbd_error(dbh, rc, "SQLSetConnectAttr for odbc_taf_callback handle"); return FALSE; } } break; case ODBC_COLUMN_DISPLAY_SIZE: bSetSQLConnectionOption = FALSE; imp_dbh->odbc_column_display_size = SvIV(valuesv); break; case ODBC_UTF8_ON: bSetSQLConnectionOption = FALSE; imp_dbh->odbc_utf8_on = SvIV(valuesv); break; case ODBC_EXEC_DIRECT: bSetSQLConnectionOption = FALSE; /* * set value of odbc_exec_direct. Non-zero will * make prepare, essentially a noop and make execute * use SQLExecDirect. This is to support drivers that * _only_ support SQLExecDirect. */ imp_dbh->odbc_exec_direct = SvTRUE(valuesv); break; case ODBC_DESCRIBE_PARAMETERS: bSetSQLConnectionOption = FALSE; imp_dbh->odbc_describe_parameters = SvTRUE(valuesv); break; case ODBC_ASYNC_EXEC: bSetSQLConnectionOption = FALSE; /* * set asynchronous execution. It can only be turned on if * the driver supports it, but will fail silently. */ if (SvTRUE(valuesv)) { /* Only bother setting the attribute if it's not already set! */ if (imp_dbh->odbc_async_exec) break; /* * Determine which method of async execution this * driver allows -- per-connection or per-statement */ rc = SQLGetInfo(imp_dbh->hdbc, SQL_ASYNC_MODE, &imp_dbh->odbc_async_type, sizeof(imp_dbh->odbc_async_type), NULL); /* * Normally, we'd do a if (!SQL_ok(rc)) ... here. * Unfortunately, if the driver doesn't support async * mode, it may return an error here. There doesn't * seem to be any other way to check (other than doing * a special check for the SQLSTATE). We'll just default * to doing nothing and not bother checking errors. */ if (imp_dbh->odbc_async_type == SQL_AM_CONNECTION){ /* * Driver has per-connection async option. Set it * now in the dbh. */ if (DBIc_TRACE(imp_dbh, DBD_TRACING, 0, 4)) TRACE0(imp_dbh, " Supported AsyncType is SQL_AM_CONNECTION\n"); rc = SQLSetConnectAttr(imp_dbh->hdbc, SQL_ATTR_ASYNC_ENABLE, (SQLPOINTER)SQL_ASYNC_ENABLE_ON, SQL_IS_UINTEGER); if (!SQL_SUCCEEDED(rc)) { dbd_error(dbh, rc, "db_STORE/SQLSetConnectAttr"); return FALSE; } imp_dbh->odbc_async_exec = 1; } else if (imp_dbh->odbc_async_type == SQL_AM_STATEMENT){ /* * Driver has per-statement async option. Just set * odbc_async_exec and the rest will be handled by * dbd_st_prepare. */ if (DBIc_TRACE(imp_dbh, DBD_TRACING, 0, 4)) TRACE0(imp_dbh, " Supported AsyncType is SQL_AM_STATEMENT\n"); imp_dbh->odbc_async_exec = 1; } else { /* (imp_dbh->odbc_async_type == SQL_AM_NONE) */ /* * We're out of luck. */ if (DBIc_TRACE(imp_dbh, DBD_TRACING, 0, 4)) TRACE0(imp_dbh, " Supported AsyncType is SQL_AM_NONE\n"); imp_dbh->odbc_async_exec = 0; return FALSE; } } else { /* Only bother turning it off if it was previously set... */ if (imp_dbh->odbc_async_exec == 1) { /* We only need to do anything here if odbc_async_type is * SQL_AM_CONNECTION since the per-statement async type * is turned on only when the statement handle is created. */ if (imp_dbh->odbc_async_type == SQL_AM_CONNECTION){ rc = SQLSetConnectAttr(imp_dbh->hdbc, SQL_ATTR_ASYNC_ENABLE, (SQLPOINTER)SQL_ASYNC_ENABLE_OFF, SQL_IS_UINTEGER); if (!SQL_SUCCEEDED(rc)) { dbd_error(dbh, rc, "db_STORE/SQLSetConnectAttr"); return FALSE; } } } imp_dbh->odbc_async_exec = 0; } break; case ODBC_ERR_HANDLER: bSetSQLConnectionOption = FALSE; /* This was taken from DBD::Sybase 0.21 */ /* I believe the following if test which has been in DBD::ODBC * for ages is wrong and should (at least now) use SvOK or * it is impossible to reset the error handler * * if(valuesv == &PL_sv_undef) { * imp_dbh->odbc_err_handler = NULL; */ if (!SvOK(valuesv)) { imp_dbh->odbc_err_handler = NULL; } else if(imp_dbh->odbc_err_handler == (SV*)NULL) { imp_dbh->odbc_err_handler = newSVsv(valuesv); } else { sv_setsv(imp_dbh->odbc_err_handler, valuesv); } break; case ODBC_VERSION: /* set only in connect, nothing to store */ bSetSQLConnectionOption = FALSE; break; case ODBC_CURSORTYPE: /* set only in connect, nothing to store */ bSetSQLConnectionOption = FALSE; break; case SQL_ATTR_ACCESS_MODE: on = SvTRUE(valuesv); vParam = (SQLPOINTER)(on ? pars->atrue : pars->afalse); break; default: on = SvTRUE(valuesv); vParam = (SQLPOINTER)(on ? pars->atrue : pars->afalse); break; } if (bSetSQLConnectionOption) { rc = SQLSetConnectAttr(imp_dbh->hdbc, pars->fOption, vParam, attr_length); if (!SQL_SUCCEEDED(rc)) { dbd_error(dbh, rc, "db_STORE/SQLSetConnectAttr"); return FALSE; } else if ((SQL_SUCCESS_WITH_INFO == rc) && (pars->fOption == SQL_ATTR_ACCESS_MODE)) { char state[SQL_SQLSTATE_SIZE+1]; SQLINTEGER native; char msg[256]; SQLSMALLINT msg_len; /* If we attempted to set SQL_ATTR_ACCESS_MODE, save the result to return from FETCH, even if it didn't work */ if (vParam == (SQLPOINTER)pars->atrue) { imp_dbh->read_only = 1; } else { imp_dbh->read_only = 0; } (void)SQLGetDiagRec(SQL_HANDLE_DBC, imp_dbh->hdbc, 1, (SQLCHAR *)state, &native, msg, sizeof(msg), &msg_len); DBIh_SET_ERR_CHAR( dbh, (imp_xxh_t*)imp_dbh, "0" /* warning state */, 1, msg, state, Nullch); } if (pars->fOption == SQL_ROWSET_SIZE) imp_dbh->rowset_size = (SQLULEN)vParam; /* keep our flags in sync */ if (kl == 10 && strEQ(key, "AutoCommit")) DBIc_set(imp_dbh, DBIcf_AutoCommit, SvTRUE(valuesv)); } return TRUE; } /*======================================================================*/ /* */ /* dbd_db_FETCH_attrib */ /* =================== */ /* */ /* Counterpart of dbd_db_STORE_attrib handing: */ /* */ /* $value = $dbh->{$key}; */ /* */ /* returns an "SV" with the value */ /* */ /*======================================================================*/ SV *dbd_db_FETCH_attrib(SV *dbh, imp_dbh_t *imp_dbh, SV *keysv) { dTHX; RETCODE rc; STRLEN kl; char *key = SvPV(keysv,kl); const db_params *pars; SV *retsv = Nullsv; /* checking pars we need FAST */ if (DBIc_TRACE(imp_dbh, DBD_TRACING, 0, 8)) TRACE1(imp_dbh, " FETCH %s\n", key); if ((pars = S_dbOption(S_db_options, key, kl)) == NULL) return Nullsv; if (!(pars->dir & PARAM_READ)) return Nullsv; switch (pars->fOption) { case ODBC_OUTCON_STR: if (!imp_dbh->out_connect_string) { retsv = &PL_sv_undef; } else { retsv = newSVsv(imp_dbh->out_connect_string); } break; case SQL_DRIVER_ODBC_VER: retsv = newSVpv(imp_dbh->odbc_ver, 0); break; case SQL_DBMS_NAME: retsv = newSVpv(imp_dbh->odbc_dbms_name, 0); break; case ODBC_IGNORE_NAMED_PLACEHOLDERS: retsv = newSViv(imp_dbh->odbc_ignore_named_placeholders); break; case ODBC_ARRAY_OPERATIONS: retsv = newSViv(imp_dbh->odbc_array_operations); break; case ODBC_QUERY_TIMEOUT: /* * fetch current value of query timeout * * -1 is our internal flag saying odbc_query_timeout has never been * set so we map it back to the default for ODBC which is 0 */ if (imp_dbh->odbc_query_timeout == -1) { retsv = newSViv(0); } else { retsv = newSViv(imp_dbh->odbc_query_timeout); } break; case ODBC_PUTDATA_START: retsv = newSViv(imp_dbh->odbc_putdata_start); break; case ODBC_BATCH_SIZE: retsv = newSViv(imp_dbh->odbc_batch_size); break; case ODBC_COLUMN_DISPLAY_SIZE: retsv = newSViv(imp_dbh->odbc_column_display_size); break; case ODBC_UTF8_ON: retsv = newSViv(imp_dbh->odbc_utf8_on); break; case ODBC_HAS_UNICODE: retsv = newSViv(imp_dbh->odbc_has_unicode); break; case ODBC_DEFAULT_BIND_TYPE: retsv = newSViv(imp_dbh->odbc_default_bind_type); break; case ODBC_FORCE_BIND_TYPE: retsv = newSViv(imp_dbh->odbc_force_bind_type); break; case ODBC_FORCE_REBIND: retsv = newSViv(imp_dbh->odbc_force_rebind); break; case ODBC_EXEC_DIRECT: retsv = newSViv(imp_dbh->odbc_exec_direct); break; case ODBC_DRIVER_COMPLETE: retsv = newSViv(imp_dbh->odbc_driver_complete); break; case ODBC_DESCRIBE_PARAMETERS: retsv = newSViv(imp_dbh->odbc_describe_parameters); break; case ODBC_ASYNC_EXEC: /* * fetch current value of asynchronous execution (should be * either 0 or 1). */ retsv = newSViv(imp_dbh->odbc_async_exec); break; case ODBC_ERR_HANDLER: /* fetch current value of the error handler (a coderef). */ if(imp_dbh->odbc_err_handler) { retsv = newSVsv(imp_dbh->odbc_err_handler); } else { retsv = &PL_sv_undef; } break; case ODBC_ROWCACHESIZE: retsv = newSViv(imp_dbh->RowCacheSize); break; default: { enum gettype type = pars->type; char strval[256]; SQLUINTEGER uval = 0; SQLINTEGER retstrlen; if ((pars->fOption == SQL_ATTR_ACCESS_MODE) && (imp_dbh->read_only != -1)) { retsv = newSViv(imp_dbh->read_only); break; } /* * The remainders we support are ODBC attributes like * odbc_SQL_ROWSET_SIZE (SQL_ROWSET_SIZE), odbc_trace etc * * Nothing else should get here for now unless any item is added * to S_db_fetchOptions. */ if (type == PARAM_TYPE_UINT || type == PARAM_TYPE_BOOL) { rc = SQLGetConnectAttr( imp_dbh->hdbc, pars->fOption, &uval, SQL_IS_UINTEGER, NULL); } else if (type == PARAM_TYPE_STR) { rc = SQLGetConnectAttr( imp_dbh->hdbc, pars->fOption, strval, sizeof(strval), &retstrlen); } else { if (DBIc_TRACE(imp_dbh, DBD_TRACING, 0, 3)) TRACE2(imp_dbh, " !!unknown type %d for %s in dbd_db_FETCH\n", type, key); return Nullsv; } if (!SQL_SUCCEEDED(rc)) { if (DBIc_TRACE(imp_dbh, DBD_TRACING, 0, 3)) TRACE1(imp_dbh, " !!SQLGetConnectAttr=%d in dbd_db_FETCH\n", rc); AllODBCErrors(imp_dbh->henv, imp_dbh->hdbc, 0, 0, DBIc_LOGPIO(imp_dbh)); return Nullsv; } if (type == PARAM_TYPE_UINT) { retsv = newSViv(uval); } else if (type == PARAM_TYPE_BOOL) { if (uval == pars->atrue) retsv = newSViv(1); else retsv = newSViv(0); } else if (type == PARAM_TYPE_STR) { retsv = newSVpv(strval, retstrlen); } break; } /* end of default */ } /* outer switch */ return sv_2mortal(retsv); } /*======================================================================*/ /* */ /* S_st_fetch_params */ /* ================= */ /* S_st_store_params */ /* ================= */ /* */ /* An array of options/attributes we support on statement handles for */ /* storing and fetching. */ /* */ /*======================================================================*/ /* * added "need_describe" flag to handle the situation where you don't * have a result set yet to describe. Certain attributes don't need * the result set to operate, hence don't do a describe unless you need * to do one. * DBD::ODBC 0.45_15 * */ typedef struct { const char *str; unsigned len:8; unsigned array:1; unsigned need_describe:1; unsigned filler:22; } T_st_params; #define s_A(str,need_describe) { str, sizeof(str)-1,0,need_describe } static T_st_params S_st_fetch_params[] = { s_A("NUM_OF_PARAMS",1), /* 0 */ s_A("NUM_OF_FIELDS",1), /* 1 */ s_A("NAME",1), /* 2 */ s_A("NULLABLE",1), /* 3 */ s_A("TYPE",1), /* 4 */ s_A("PRECISION",1), /* 5 */ s_A("SCALE",1), /* 6 */ s_A("sol_type",1), /* 7 */ s_A("sol_length",1), /* 8 */ s_A("CursorName",1), /* 9 */ s_A("odbc_more_results",1), /* 10 */ s_A("ParamValues",0), /* 11 */ s_A("LongReadLen",0), /* 12 */ s_A("odbc_ignore_named_placeholders",0), /* 13 */ s_A("odbc_default_bind_type",0), /* 14 */ s_A("odbc_force_rebind",0), /* 15 */ s_A("odbc_query_timeout",0), /* 16 */ s_A("odbc_putdata_start",0), /* 17 */ s_A("ParamTypes",0), /* 18 */ s_A("odbc_column_display_size",0), /* 19 */ s_A("odbc_force_bind_type",0), /* 20 */ s_A("odbc_batch_size",0), /* 21 */ s_A("odbc_array_operations",0), /* 22 */ s_A("",0), /* END */ }; static T_st_params S_st_store_params[] = { s_A("odbc_ignore_named_placeholders",0), /* 0 */ s_A("odbc_default_bind_type",0), /* 1 */ s_A("odbc_force_rebind",0), /* 2 */ s_A("odbc_query_timeout",0), /* 3 */ s_A("odbc_putdata_start",0), /* 4 */ s_A("odbc_column_display_size",0), /* 5 */ s_A("odbc_force_bind_type",0), /* 6 */ s_A("odbc_batch_size",0), /* 7 */ s_A("odbc_array_operations",0), /* 8 */ s_A("",0), /* END */ }; #undef s_A /*======================================================================*/ /* */ /* dbd_st_FETCH_attrib */ /* =================== */ /* */ /*======================================================================*/ SV *dbd_st_FETCH_attrib(SV *sth, imp_sth_t *imp_sth, SV *keysv) { dTHX; STRLEN kl; char *key = SvPV(keysv,kl); int i; SV *retsv = NULL; T_st_params *par; char cursor_name[256]; SWORD cursor_name_len; RETCODE rc; for (par = S_st_fetch_params; par->len > 0; par++) if (par->len == kl && strEQ(key, par->str)) break; if (par->len <= 0) return Nullsv; if (par->need_describe && !imp_sth->done_desc && !dbd_describe(sth, imp_sth,0)) { /* dbd_describe has already called dbd_error() */ /* we can't return Nullsv here because the xs code will */ /* then just pass the attribute name to DBI for FETCH. */ if (DBIc_TRACE(imp_sth, DBD_TRACING, 0, 4)) { TRACE1(imp_sth, " !!!dbd_st_FETCH_attrib (%s) needed query description, " "but failed\n", par->str); } if (DBIc_WARN(imp_sth)) { warn("Describe failed during %s->FETCH(%s,%d)", SvPV(sth,PL_na), key,imp_sth->done_desc); } return &PL_sv_undef; } i = DBIc_NUM_FIELDS(imp_sth); switch(par - S_st_fetch_params) { AV *av; case 0: /* NUM_OF_PARAMS */ return Nullsv; /* handled by DBI */ case 1: /* NUM_OF_FIELDS */ if (DBIc_TRACE(imp_sth, DBD_TRACING, 0, 9)) { TRACE1(imp_sth, " dbd_st_FETCH_attrib NUM_OF_FIELDS %d\n", i); } retsv = newSViv(i); break; case 2: /* NAME */ av = newAV(); retsv = newRV_inc(sv_2mortal((SV*)av)); if (DBIc_TRACE(imp_sth, DBD_TRACING, 0, 9)) { int j; TRACE1(imp_sth, " dbd_st_FETCH_attrib NAMES %d\n", i); for (j = 0; j < i; j++) TRACE1(imp_sth, "\t%s\n", imp_sth->fbh[j].ColName); } while(--i >= 0) { if (DBIc_TRACE(imp_sth, DBD_TRACING, 0, 9)) { TRACE2(imp_sth, " Colname %d => %s\n", i, imp_sth->fbh[i].ColName); } #ifdef WITH_UNICODE av_store(av, i, sv_newwvn(aTHX_ (SQLWCHAR *)imp_sth->fbh[i].ColName, imp_sth->fbh[i].ColNameLen)); #else av_store(av, i, newSVpv(imp_sth->fbh[i].ColName, 0)); #endif } break; case 3: /* NULLABLE */ av = newAV(); retsv = newRV_inc(sv_2mortal((SV*)av)); while(--i >= 0) av_store(av, i, (imp_sth->fbh[i].ColNullable == SQL_NO_NULLS) ? &PL_sv_no : &PL_sv_yes); break; case 4: /* TYPE */ av = newAV(); retsv = newRV_inc(sv_2mortal((SV*)av)); while(--i >= 0) av_store(av, i, newSViv(imp_sth->fbh[i].ColSqlType)); break; case 5: /* PRECISION */ av = newAV(); retsv = newRV_inc(sv_2mortal((SV*)av)); while(--i >= 0) av_store(av, i, newSViv(imp_sth->fbh[i].ColDef)); break; case 6: /* SCALE */ av = newAV(); retsv = newRV_inc(sv_2mortal((SV*)av)); while(--i >= 0) av_store(av, i, newSViv(imp_sth->fbh[i].ColScale)); break; case 7: /* sol_type */ av = newAV(); retsv = newRV_inc(sv_2mortal((SV*)av)); while(--i >= 0) av_store(av, i, newSViv(imp_sth->fbh[i].ColSqlType)); break; case 8: /* sol_length */ av = newAV(); retsv = newRV_inc(sv_2mortal((SV*)av)); while(--i >= 0) av_store(av, i, newSViv(imp_sth->fbh[i].ColLength)); break; case 9: /* CursorName */ rc = SQLGetCursorName(imp_sth->hstmt, cursor_name, sizeof(cursor_name), &cursor_name_len); if (!SQL_SUCCEEDED(rc)) { dbd_error(sth, rc, "st_FETCH/SQLGetCursorName"); return Nullsv; } retsv = newSVpv(cursor_name, cursor_name_len); break; case 10: /* odbc_more_results */ retsv = newSViv(imp_sth->moreResults); if (i == 0 && imp_sth->moreResults == 0) { int outparams = (imp_sth->out_params_av) ? AvFILL(imp_sth->out_params_av)+1 : 0; if (DBIc_TRACE(imp_sth, DBD_TRACING, 0, 4)) { TRACE0(imp_sth, " numfields == 0 && moreResults = 0 finish\n"); } if (outparams) { odbc_handle_outparams(aTHX_ imp_sth, DBIc_TRACE_LEVEL(imp_sth)); } imp_sth->done_desc = 0; /* redo describe */ /* XXX need to 'finish' here */ dbd_st_finish(sth, imp_sth); } else { if (DBIc_TRACE(imp_sth, DBD_TRACING, 0, 4)) { TRACE2(imp_sth, " fetch odbc_more_results, numfields == %d " "&& moreResults = %d\n", i, imp_sth->moreResults); } } break; case 11: /* ParamValues */ { /* not sure if there's a memory leak here. */ HV *paramvalues = newHV(); if (imp_sth->all_params_hv) { HV *hv = imp_sth->all_params_hv; SV *sv; char *key; I32 retlen; hv_iterinit(hv); while( (sv = hv_iternextsv(hv, &key, &retlen)) != NULL ) { if (sv != &PL_sv_undef) { phs_t *phs = (phs_t*)(void*)SvPVX(sv); (void)hv_store(paramvalues, phs->name, (I32)strlen(phs->name), newSVsv(phs->sv), 0); } } } /* ensure HV is freed when the ref is freed */ retsv = newRV_noinc((SV *)paramvalues); break; } case 12: /* LongReadLen */ retsv = newSViv(DBIc_LongReadLen(imp_sth)); break; case 13: /* odbc_ignore_named_placeholders */ retsv = newSViv(imp_sth->odbc_ignore_named_placeholders); break; case 14: /* odbc_default_bind_type */ retsv = newSViv(imp_sth->odbc_default_bind_type); break; case 15: /* odbc_force_rebind */ retsv = newSViv(imp_sth->odbc_force_rebind); break; case 16: /* odbc_query_timeout */ /* * -1 is our internal flag saying odbc_query_timeout has never been * set so we map it back to the default for ODBC which is 0 */ if (imp_sth->odbc_query_timeout == -1) { retsv = newSViv(0); } else { retsv = newSViv(imp_sth->odbc_query_timeout); } break; case 17: /* odbc_putdata_start */ retsv = newSViv(imp_sth->odbc_putdata_start); break; case 18: /* ParamTypes */ { /* not sure if there's a memory leak here. */ HV *paramtypes = newHV(); if (imp_sth->all_params_hv) { HV *hv = imp_sth->all_params_hv; SV *sv; char *key; I32 retlen; hv_iterinit(hv); while( (sv = hv_iternextsv(hv, &key, &retlen)) != NULL ) { if (sv != &PL_sv_undef) { HV *subh = newHV(); phs_t *phs = (phs_t*)(void*)SvPVX(sv); (void)hv_store(subh, "TYPE", 4, newSViv(phs->sql_type), 0); (void)hv_store(paramtypes, phs->name, (I32)strlen(phs->name), newRV_noinc((SV *)subh), 0); } } } /* ensure HV is freed when the ref is freed */ retsv = newRV_noinc((SV *)paramtypes); break; } case 19: /* odbc_column_display_size */ retsv = newSViv(imp_sth->odbc_column_display_size); break; case 20: /* odbc_force_bind_type */ retsv = newSViv(imp_sth->odbc_force_bind_type); break; case 21: /* odbc_batch_size */ retsv = newSViv(imp_sth->odbc_batch_size); break; case 22: /* odbc_array_operations */ retsv = newSViv(imp_sth->odbc_array_operations); break; default: return Nullsv; } return sv_2mortal(retsv); } /*======================================================================*/ /* */ /* dbd_st_STORE_attrib */ /* =================== */ /* */ /*======================================================================*/ int dbd_st_STORE_attrib(SV *sth, imp_sth_t *imp_sth, SV *keysv, SV *valuesv) { dTHX; STRLEN kl; char *key = SvPV(keysv,kl); T_st_params *par; for (par = S_st_store_params; par->len > 0; par++) if (par->len == kl && strEQ(key, par->str)) break; if (par->len <= 0) return FALSE; switch(par - S_st_store_params) { case 0: imp_sth->odbc_ignore_named_placeholders = SvTRUE(valuesv); return TRUE; case 1: imp_sth->odbc_default_bind_type = (SQLSMALLINT)SvIV(valuesv); return TRUE; break; case 2: imp_sth->odbc_force_rebind = (int)SvIV(valuesv); return TRUE; break; case 3: imp_sth->odbc_query_timeout = SvIV(valuesv); return TRUE; break; case 4: imp_sth->odbc_putdata_start = SvIV(valuesv); return TRUE; break; case 5: imp_sth->odbc_column_display_size = SvIV(valuesv); return TRUE; break; case 6: imp_sth->odbc_force_bind_type = (SQLSMALLINT)SvIV(valuesv); return TRUE; break; case 7: imp_sth->odbc_batch_size = SvIV(valuesv); if (imp_sth->odbc_batch_size == 0) { croak("You cannot set odbc_batch_size to zero"); } return TRUE; break; case 8: imp_sth->odbc_array_operations = SvTRUE(valuesv); return TRUE; } return FALSE; } SV *odbc_get_info(dbh, ftype) SV *dbh; int ftype; { dTHX; D_imp_dbh(dbh); RETCODE rc; SV *retsv = NULL; int i; int size = 256; char *rgbInfoValue; SWORD cbInfoValue = -2; New(0, rgbInfoValue, size, char); /* See fancy logic below */ for (i = 0; i < 6; i++) rgbInfoValue[i] = (char)0xFF; rc = SQLGetInfo(imp_dbh->hdbc, (SQLUSMALLINT)ftype, rgbInfoValue, (SQLSMALLINT)(size-1), &cbInfoValue); if (cbInfoValue > size-1) { Renew(rgbInfoValue, cbInfoValue+1, char); rc = SQLGetInfo(imp_dbh->hdbc, (SQLUSMALLINT)ftype, rgbInfoValue, cbInfoValue, &cbInfoValue); } if (!SQL_SUCCEEDED(rc)) { dbd_error(dbh, rc, "odbc_get_info/SQLGetInfo"); Safefree(rgbInfoValue); /* patched 2/12/02, thanks to Steffen Goldner */ return &PL_sv_undef; /* return Nullsv; */ } /* Fancy logic here to determine if result is a string or int */ if (cbInfoValue == -2) /* is int */ retsv = newSViv(*(int *)rgbInfoValue); /* XXX cast */ else if (cbInfoValue != 2 && cbInfoValue != 4) /* must be string */ retsv = newSVpv(rgbInfoValue, 0); else if (rgbInfoValue[cbInfoValue] == '\0') /* must be string */ /* patch from Steffen Goldner 0.37 2/12/02 */ retsv = newSVpv(rgbInfoValue, 0); else if (cbInfoValue == 2) /* short */ retsv = newSViv(*(short *)rgbInfoValue); /* XXX cast */ else if (cbInfoValue == 4) /* int */ retsv = newSViv(*(int *)rgbInfoValue); /* XXX cast */ else croak("panic: SQLGetInfo cbInfoValue == %d", cbInfoValue); if (DBIc_TRACE(imp_dbh, DBD_TRACING, 0, 4)) PerlIO_printf( DBIc_LOGPIO(imp_dbh), " SQLGetInfo: ftype %d, cbInfoValue %d: %s\n", ftype, cbInfoValue, neatsvpv(retsv,0)); Safefree(rgbInfoValue); return sv_2mortal(retsv); } #ifdef THE_FOLLOWING_NO_LONGER_USED_REPLACE_BY_dbd_st_statistics int odbc_get_statistics(dbh, sth, CatalogName, SchemaName, TableName, Unique) SV * dbh; SV * sth; char * CatalogName; char * SchemaName; char * TableName; int Unique; { dTHX; D_imp_dbh(dbh); D_imp_sth(sth); RETCODE rc; int dbh_active; imp_sth->henv = imp_dbh->henv; /* needed for dbd_error */ imp_sth->hdbc = imp_dbh->hdbc; imp_sth->done_desc = 0; if ((dbh_active = check_connection_active(aTHX_ dbh)) == 0) return 0; rc = SQLAllocHandle(SQL_HANDLE_STMT, imp_dbh->hdbc, &imp_sth->hstmt); if (rc != SQL_SUCCESS) { dbd_error(sth, rc, "odbc_get_statistics/SQLAllocHandle(stmt)"); return 0; } rc = SQLStatistics(imp_sth->hstmt, CatalogName, (SQLSMALLINT)strlen(CatalogName), SchemaName, (SQLSMALLINT)strlen(SchemaName), TableName, (SQLSMALLINT)strlen(TableName), (SQLUSMALLINT)Unique, (SQLUSMALLINT)0); if (DBIc_TRACE(imp_sth, DBD_TRACING, 0, 4)) TRACE1(imp_dbh, " SQLStatistics=%d\n", rc); if (!SQL_SUCCEEDED(rc)) { dbd_error(sth, rc, "odbc_get_statistics/SQLGetStatistics"); return 0; } return build_results(aTHX_ sth, imp_sth, dbh, imp_dbh, rc); } #endif /* THE_FOLLOWING_NO_LONGER_USED_REPLACE_BY_dbd_st_statistics */ #ifdef THE_FOLLOWING_NO_LONGER_USED_REPLACE_BY_dbd_st_primary_keys int odbc_get_primary_keys(dbh, sth, CatalogName, SchemaName, TableName) SV * dbh; SV * sth; char * CatalogName; char * SchemaName; char * TableName; { dTHX; D_imp_dbh(dbh); D_imp_sth(sth); RETCODE rc; int dbh_active; imp_sth->henv = imp_dbh->henv; /* needed for dbd_error */ imp_sth->hdbc = imp_dbh->hdbc; imp_sth->done_desc = 0; if ((dbh_active = check_connection_active(aTHX_ dbh)) == 0) return 0; rc = SQLAllocHandle(SQL_HANDLE_STMT, imp_dbh->hdbc, &imp_sth->hstmt); if (rc != SQL_SUCCESS) { dbd_error(sth, rc, "odbc_get_primary_keys/SQLAllocHandle(stmt)"); return 0; } rc = SQLPrimaryKeys(imp_sth->hstmt, CatalogName, (SQLSMALLINT)strlen(CatalogName), SchemaName, (SQLSMALLINT)strlen(SchemaName), TableName, (SQLSMALLINT)strlen(TableName)); if (DBIc_TRACE(imp_sth, DBD_TRACING, 0, 3)) TRACE1(imp_dbh, " SQLPrimaryKeys rc = %d\n", rc); if (!SQL_SUCCEEDED(rc)) { dbd_error(sth, rc, "odbc_get_primary_keys/SQLPrimaryKeys"); return 0; } return build_results(aTHX_ sth, imp_sth, dbh, imp_dbh, rc); } #endif /* THE_FOLLOWING_NO_LONGER_USED_REPLACE_BY_dbd_st_primary_keys */ int odbc_get_special_columns(dbh, sth, Identifier, CatalogName, SchemaName, TableName, Scope, Nullable) SV * dbh; SV * sth; int Identifier; char * CatalogName; char * SchemaName; char * TableName; int Scope; int Nullable; { dTHX; D_imp_dbh(dbh); D_imp_sth(sth); RETCODE rc; int dbh_active; imp_sth->henv = imp_dbh->henv; /* needed for dbd_error */ imp_sth->hdbc = imp_dbh->hdbc; imp_sth->done_desc = 0; if ((dbh_active = check_connection_active(aTHX_ dbh)) == 0) return 0; rc = SQLAllocHandle(SQL_HANDLE_STMT, imp_dbh->hdbc, &imp_sth->hstmt); if (rc != SQL_SUCCESS) { dbd_error(sth, rc, "odbc_get_special_columns/SQLAllocHandle(stmt)"); return 0; } rc = SQLSpecialColumns(imp_sth->hstmt, (SQLSMALLINT)Identifier, CatalogName, (SQLSMALLINT)strlen(CatalogName), SchemaName, (SQLSMALLINT)strlen(SchemaName), TableName, (SQLSMALLINT)strlen(TableName), (SQLSMALLINT)Scope, (SQLSMALLINT)Nullable); if (DBIc_TRACE(imp_sth, DBD_TRACING, 0, 4)) TRACE1(imp_dbh, " SQLSpecialColumns=%d\n", rc); if (!SQL_SUCCEEDED(rc)) { dbd_error(sth, rc, "odbc_get_special_columns/SQLSpecialClumns"); return 0; } return build_results(aTHX_ sth, imp_sth, dbh, imp_dbh, rc); } int odbc_get_foreign_keys(dbh, sth, PK_CatalogName, PK_SchemaName, PK_TableName, FK_CatalogName, FK_SchemaName, FK_TableName) SV * dbh; SV * sth; char * PK_CatalogName; char * PK_SchemaName; char * PK_TableName; char * FK_CatalogName; char * FK_SchemaName; char * FK_TableName; { dTHX; D_imp_dbh(dbh); D_imp_sth(sth); RETCODE rc; int dbh_active; size_t max_stmt_len; imp_sth->henv = imp_dbh->henv; /* needed for dbd_error */ imp_sth->hdbc = imp_dbh->hdbc; imp_sth->done_desc = 0; if ((dbh_active = check_connection_active(aTHX_ dbh)) == 0) return 0; rc = SQLAllocHandle(SQL_HANDLE_STMT, imp_dbh->hdbc, &imp_sth->hstmt); if (rc != SQL_SUCCESS) { dbd_error(sth, rc, "odbc_get_foreign_keys/SQLAllocHandle(stmt)"); return 0; } /* just for sanity, later. Any internals that may rely on this (including */ /* debugging) will have valid data */ max_stmt_len = strlen(cSqlForeignKeys)+ strlen(XXSAFECHAR(PK_CatalogName))+ strlen(XXSAFECHAR(PK_SchemaName))+ strlen(XXSAFECHAR(PK_TableName))+ strlen(XXSAFECHAR(FK_CatalogName))+ strlen(XXSAFECHAR(FK_SchemaName))+ strlen(XXSAFECHAR(FK_TableName))+ 1; imp_sth->statement = (char *)safemalloc(max_stmt_len); my_snprintf(imp_sth->statement, max_stmt_len, cSqlForeignKeys, XXSAFECHAR(PK_CatalogName), XXSAFECHAR(PK_SchemaName), XXSAFECHAR(PK_TableName), XXSAFECHAR(FK_CatalogName), XXSAFECHAR(FK_SchemaName),XXSAFECHAR(FK_TableName) ); /* fix to handle "" (undef) calls -- thanks to Kevin Shepherd */ rc = SQLForeignKeys( imp_sth->hstmt, (PK_CatalogName && *PK_CatalogName) ? PK_CatalogName : 0, SQL_NTS, (PK_SchemaName && *PK_SchemaName) ? PK_SchemaName : 0, SQL_NTS, (PK_TableName && *PK_TableName) ? PK_TableName : 0, SQL_NTS, (FK_CatalogName && *FK_CatalogName) ? FK_CatalogName : 0, SQL_NTS, (FK_SchemaName && *FK_SchemaName) ? FK_SchemaName : 0, SQL_NTS, (FK_TableName && *FK_TableName) ? FK_TableName : 0, SQL_NTS); if (DBIc_TRACE(imp_sth, DBD_TRACING, 0, 4)) TRACE1(imp_dbh, " SQLForeignKeys=%d\n", rc); if (!SQL_SUCCEEDED(rc)) { dbd_error(sth, rc, "odbc_get_foreign_keys/SQLForeignKeys"); return 0; } return build_results(aTHX_ sth, imp_sth, dbh, imp_dbh, rc); } #ifdef ODBC_NOW_DEPRECATED int odbc_describe_col( SV *sth, int colno, char *ColumnName, I16 BufferLength, I16 *NameLength, I16 *DataType, U32 *ColumnSize, I16 *DecimalDigits, I16 *Nullable) { D_imp_sth(sth); SQLULEN ColSize; RETCODE rc; rc = SQLDescribeCol(imp_sth->hstmt, (SQLSMALLINT)colno, ColumnName, BufferLength, NameLength, DataType, &ColSize, DecimalDigits, Nullable); if (!SQL_SUCCEEDED(rc)) { dbd_error(sth, rc, "DescribeCol/SQLDescribeCol"); return 0; } *ColumnSize = (U32)ColSize; return 1; } #endif /* ODBC_NOW_DEPRECATED */ int odbc_get_type_info( SV *dbh, SV *sth, int ftype) { dTHX; D_imp_dbh(dbh); D_imp_sth(sth); RETCODE rc; int dbh_active; size_t max_stmt_len; #if 0 /* TBD: cursorname? */ char cname[128]; /* cursorname */ #endif imp_sth->henv = imp_dbh->henv; /* needed for dbd_error */ imp_sth->hdbc = imp_dbh->hdbc; imp_sth->done_desc = 0; if ((dbh_active = check_connection_active(aTHX_ dbh)) == 0) return 0; rc = SQLAllocHandle(SQL_HANDLE_STMT, imp_dbh->hdbc, &imp_sth->hstmt); if (rc != SQL_SUCCESS) { dbd_error(sth, rc, "odbc_get_type_info/SQLAllocHandle(stmt)"); return 0; } /* just for sanity, later. Any internals that may rely on this (including */ /* debugging) will have valid data */ max_stmt_len = strlen(cSqlGetTypeInfo)+(abs(ftype)/10)+2; imp_sth->statement = (char *)safemalloc(max_stmt_len); my_snprintf(imp_sth->statement, max_stmt_len, cSqlGetTypeInfo, ftype); #ifdef WITH_UNICODE rc = SQLGetTypeInfoW(imp_sth->hstmt, (SQLSMALLINT)ftype); #else rc = SQLGetTypeInfo(imp_sth->hstmt, (SQLSMALLINT)ftype); #endif if (DBIc_TRACE(imp_sth, DBD_TRACING, 0, 4)) TRACE2(imp_dbh, " SQLGetTypeInfo(%d)=%d\n", ftype, rc); dbd_error(sth, rc, "odbc_get_type_info/SQLGetTypeInfo"); if (!SQL_SUCCEEDED(rc)) { SQLFreeHandle(SQL_HANDLE_STMT,imp_sth->hstmt); imp_sth->hstmt = SQL_NULL_HSTMT; return 0; } return build_results(aTHX_ sth, imp_sth, dbh, imp_dbh, rc); } SV *odbc_cancel(SV *sth) { dTHX; D_imp_sth(sth); RETCODE rc; rc = SQLCancel(imp_sth->hstmt); if (!SQL_SUCCEEDED(rc)) { dbd_error(sth, rc, "odbc_cancel/SQLCancel"); return Nullsv; } return newSViv(1); } IV odbc_st_lob_read( SV *sth, int colno, SV *data, UV length, IV type) { dTHX; D_imp_sth(sth); SQLLEN len = 0; SQLRETURN rc; imp_fbh_t *fbh; SQLSMALLINT col_type; IV retlen = 0; char *buf = SvPV_nolen(data); fbh = &imp_sth->fbh[colno-1]; /*printf("fbh->ColSqlType=%s\n", S_SqlTypeToString(fbh->ColSqlType));*/ if ((fbh->bind_flags & ODBC_TREAT_AS_LOB) == 0) { croak("Column %d was not bound with TreatAsLOB", colno); } if ((fbh->ColSqlType == SQL_BINARY) || (fbh->ColSqlType == SQL_VARBINARY) || (fbh->ColSqlType == SQL_LONGVARBINARY)) { col_type = SQL_C_BINARY; } else { #ifdef WITH_UNICODE col_type = SQL_C_WCHAR; #else col_type = SQL_C_CHAR; #endif /* WITH_UNICODE */ } if (type != 0) { col_type = (SQLSMALLINT)type; } rc = SQLGetData(imp_sth->hstmt, colno, col_type, buf, length, &len); if (DBIc_TRACE(imp_sth, DBD_TRACING, 0, 4)) PerlIO_printf(DBIc_LOGPIO(imp_sth), " SQLGetData(col=%d,type=%d)=%d (retlen=%ld)\n", colno, col_type, rc, len); if (rc == SQL_NO_DATA) { /* finished - SQLGetData returns this when you call it after it has returned all of the data */ return 0; } else if (!SQL_SUCCEEDED(rc)) { dbd_error(sth, rc, "odbc_st_lob_read/SQLGetData"); return -1; } else if (rc == SQL_SUCCESS_WITH_INFO) { /* we are assuming this is 01004 - string data right truncation unless len == SQL_NO_TOTAL */ if (len == SQL_NO_TOTAL) { dbd_error(sth, rc, "Driver did not return the lob length - SQL_NO_TOTAL)"); return -1; } retlen = length; if (col_type == SQL_C_CHAR) { retlen--; /* NUL chr at end */ } } else if (rc == SQL_SUCCESS) { if (len == SQL_NULL_DATA) { return 0; } retlen = len; } #ifdef WITH_UNICODE if (col_type == SQL_C_WCHAR) { char *c1; c1 = PVallocW((SQLWCHAR *)buf); buf = SvGROW(data, strlen(c1) + 1); retlen = retlen / sizeof(SQLWCHAR); strcpy(buf, c1); PVfreeW(c1); # ifdef sv_utf8_decode sv_utf8_decode(data); # else SvUTF8_on(data); # endif } # endif return retlen; } /************************************************************************/ /* */ /* odbc_col_attributes */ /* =================== */ /* */ /************************************************************************/ SV *odbc_col_attributes(SV *sth, int colno, int desctype) { dTHX; D_imp_sth(sth); RETCODE rc; SV *retsv = NULL; unsigned char str_attr[512]; SWORD str_attr_len = 0; SQLLEN num_attr = 0; memset(str_attr, '\0', sizeof(str_attr)); if ( !DBIc_ACTIVE(imp_sth) ) { dbd_error(sth, DBDODBC_INTERNAL_ERROR, "no statement executing"); return Nullsv; } /* * At least on Win95, calling this with colno==0 would "core" dump/GPF. * protect, even though it's valid for some values of desctype * (e.g. SQL_COLUMN_COUNT, since it doesn't depend on the colcount) */ if (colno == 0) { dbd_error(sth, DBDODBC_INTERNAL_ERROR, "cannot obtain SQLColAttributes for column 0"); return Nullsv; } /* * workaround a problem in unixODBC 2.2.11 which can write off the * end of the str_attr buffer when built with unicode - lie about * buffer size - we've got more than we admit to. */ rc = SQLColAttributes(imp_sth->hstmt, (SQLUSMALLINT)colno, (SQLUSMALLINT)desctype, str_attr, sizeof(str_attr)/2, &str_attr_len, &num_attr); if (!SQL_SUCCEEDED(rc)) { dbd_error(sth, rc, "odbc_col_attributes/SQLColAttributes"); return Nullsv; } else if (SQL_SUCCESS_WITH_INFO == rc) { warn("SQLColAttributes has truncated returned data"); } if (DBIc_TRACE(imp_sth, DBD_TRACING, 0, 3)) { PerlIO_printf( DBIc_LOGPIO(imp_sth), " SQLColAttributes: colno=%d, desctype=%d, str_attr=%s, " "str_attr_len=%d, num_attr=%ld", colno, desctype, str_attr, str_attr_len, (long)num_attr); } switch (desctype) { case SQL_COLUMN_AUTO_INCREMENT: case SQL_COLUMN_CASE_SENSITIVE: case SQL_COLUMN_COUNT: case SQL_COLUMN_DISPLAY_SIZE: case SQL_COLUMN_LENGTH: case SQL_COLUMN_MONEY: case SQL_COLUMN_NULLABLE: case SQL_COLUMN_PRECISION: case SQL_COLUMN_SCALE: case SQL_COLUMN_SEARCHABLE: case SQL_COLUMN_TYPE: case SQL_COLUMN_UNSIGNED: case SQL_COLUMN_UPDATABLE: { retsv = newSViv(num_attr); break; } case SQL_COLUMN_LABEL: case SQL_COLUMN_NAME: case SQL_COLUMN_OWNER_NAME: case SQL_COLUMN_QUALIFIER_NAME: case SQL_COLUMN_TABLE_NAME: case SQL_COLUMN_TYPE_NAME: { /* * NOTE: in unixODBC 2.2.11, if you called SQLDriverConnectW and * then called SQLColAttributes for a string type it would often * return half the number of characters it had written to * str_attr in str_attr_len. */ retsv = newSVpv(str_attr, strlen(str_attr)); break; } default: { dbd_error(sth, DBDODBC_INTERNAL_ERROR, "driver-specific column attributes not supported"); return Nullsv; break; } } #ifdef OLD_STUFF_THAT_SEEMS_FLAWED /* * sigh...Oracle's ODBC driver version 8.0.4 resets str_attr_len to 0, when * putting a value in num_attr. This is a change! * * double sigh. SQL Server (and MySql under Unix) set str_attr_len * but use num_attr, not str_attr. This change may be problematic * for other drivers. (the additional || num_attr != -2...) */ if (str_attr_len == -2 || str_attr_len == 0 || num_attr != -2) retsv = newSViv(num_attr); else if (str_attr_len != 2 && str_attr_len != 4) retsv = newSVpv(str_attr, 0); else if (str_attr[str_attr_len] == '\0') /* fix for DBD::ODBC 0.39 thanks to Nicolas DeRico */ retsv = newSVpv(str_attr, 0); else { if (str_attr_len == 2) retsv = newSViv(*(short *)str_attr); else retsv = newSViv(*(int *)str_attr); } #endif return sv_2mortal(retsv); } #ifdef OLD_ONE_BEFORE_SCALARS int odbc_db_columns(dbh, sth, catalog, schema, table, column) SV *dbh; SV *sth; char *catalog; char *schema; char *table; char *column; { dTHX; D_imp_dbh(dbh); D_imp_sth(sth); RETCODE rc; int dbh_active; size_t max_stmt_len; imp_sth->henv = imp_dbh->henv; /* needed for dbd_error */ imp_sth->hdbc = imp_dbh->hdbc; imp_sth->done_desc = 0; if ((dbh_active = check_connection_active(aTHX_ dbh)) == 0) return 0; rc = SQLAllocHandle(SQL_HANDLE_STMT, imp_dbh->hdbc, &imp_sth->hstmt); if (rc != SQL_SUCCESS) { dbd_error(sth, rc, "odbc_db_columns/SQLAllocHandle(stmt)"); return 0; } /* just for sanity, later. Any internals that may rely on this (including */ /* debugging) will have valid data */ max_stmt_len = strlen(cSqlColumns)+ strlen(XXSAFECHAR(catalog))+ strlen(XXSAFECHAR(schema))+ strlen(XXSAFECHAR(table))+ strlen(XXSAFECHAR(column))+1; imp_sth->statement = (char *)safemalloc(max_stmt_len); my_snprintf(imp_sth->statement, max_stmt_len, cSqlColumns, XXSAFECHAR(catalog), XXSAFECHAR(schema), XXSAFECHAR(table), XXSAFECHAR(column)); rc = SQLColumns(imp_sth->hstmt, (catalog && *catalog) ? catalog : 0, SQL_NTS, (schema && *schema) ? schema : 0, SQL_NTS, (table && *table) ? table : 0, SQL_NTS, (column && *column) ? column : 0, SQL_NTS); if (DBIc_TRACE(imp_sth, DBD_TRACING, 0, 3)) PerlIO_printf( DBIc_LOGPIO(imp_dbh), " SQLColumns call: cat = %s, schema = %s, table = %s, " "column = %s\n", XXSAFECHAR(catalog), XXSAFECHAR(schema), XXSAFECHAR(table), XXSAFECHAR(column)); dbd_error(sth, rc, "odbc_columns/SQLColumns"); if (!SQL_SUCCEEDED(rc)) { SQLFreeHandle(SQL_HANDLE_STMT,imp_sth->hstmt); imp_sth->hstmt = SQL_NULL_HSTMT; return 0; } return build_results(aTHX_ sth, imp_sth, dbh, imp_dbh, rc); } #endif /* OLD_ONE_BEFORE_SCALARS */ int odbc_db_columns( SV *dbh, SV *sth, SV *catalog, SV *schema, SV *table, SV *column) { dTHX; D_imp_dbh(dbh); D_imp_sth(sth); RETCODE rc; int dbh_active; size_t max_stmt_len; char *acatalog = NULL; char *aschema = NULL; char *atable = NULL; char *acolumn = NULL; imp_sth->henv = imp_dbh->henv; /* needed for dbd_error */ imp_sth->hdbc = imp_dbh->hdbc; imp_sth->done_desc = 0; if ((dbh_active = check_connection_active(aTHX_ dbh)) == 0) return 0; rc = SQLAllocHandle(SQL_HANDLE_STMT, imp_dbh->hdbc, &imp_sth->hstmt); if (rc != SQL_SUCCESS) { dbd_error(sth, rc, "odbc_db_columns/SQLAllocHandle(stmt)"); return 0; } if (SvOK(catalog)) acatalog = SvPV_nolen(catalog); if (SvOK(schema)) aschema = SvPV_nolen(schema); if (SvOK(table)) atable = SvPV_nolen(table); if (SvOK(column)) acolumn = SvPV_nolen(column); /* just for sanity, later. Any internals that may rely on this (including */ /* debugging) will have valid data */ max_stmt_len = strlen(cSqlColumns)+ strlen(XXSAFECHAR(acatalog))+ strlen(XXSAFECHAR(aschema))+ strlen(XXSAFECHAR(atable))+ strlen(XXSAFECHAR(acolumn))+1; imp_sth->statement = (char *)safemalloc(max_stmt_len); my_snprintf(imp_sth->statement, max_stmt_len, cSqlColumns, XXSAFECHAR(acatalog), XXSAFECHAR(aschema), XXSAFECHAR(atable), XXSAFECHAR(acolumn)); #ifdef WITH_UNICODE { SQLWCHAR *wcatalog = NULL; SQLWCHAR *wschema = NULL; SQLWCHAR *wtable = NULL; SQLWCHAR *wcolumn = NULL; STRLEN wlen; SV *copy; if (SvOK(catalog)) { copy = sv_mortalcopy(catalog); SV_toWCHAR(aTHX_ copy); wcatalog = (SQLWCHAR *)SvPV(copy, wlen); } if (SvOK(schema)) { copy = sv_mortalcopy(schema); SV_toWCHAR(aTHX_ copy); wschema = (SQLWCHAR *)SvPV(copy, wlen); } if (SvOK(table)) { copy = sv_mortalcopy(table); SV_toWCHAR(aTHX_ copy); wtable = (SQLWCHAR *)SvPV(copy, wlen); } if (SvOK(column)) { copy = sv_mortalcopy(column); SV_toWCHAR(aTHX_ copy); wcolumn = (SQLWCHAR *)SvPV(copy, wlen); } rc = SQLColumnsW(imp_sth->hstmt, (wcatalog && *wcatalog) ? wcatalog : NULL, SQL_NTS, (wschema && *wschema) ? wschema : NULL, SQL_NTS, (wtable && *wtable) ? wtable : NULL, SQL_NTS, (wcolumn && *wcolumn) ? wcolumn : 0, SQL_NTS ); } #else { rc = SQLColumns(imp_sth->hstmt, (acatalog && *acatalog) ? acatalog : 0, SQL_NTS, (aschema && *aschema) ? aschema : 0, SQL_NTS, (atable && *atable) ? atable : 0, SQL_NTS, (acolumn && *acolumn) ? acolumn : 0, SQL_NTS); } #endif /* WITH_UNICODE */ if (DBIc_TRACE(imp_sth, DBD_TRACING, 0, 3)) PerlIO_printf( DBIc_LOGPIO(imp_dbh), " SQLColumns call: cat = %s, schema = %s, table = %s, " "column = %s\n", XXSAFECHAR(acatalog), XXSAFECHAR(aschema), XXSAFECHAR(atable), XXSAFECHAR(acolumn)); dbd_error(sth, rc, "odbc_columns/SQLColumns"); if (!SQL_SUCCEEDED(rc)) { SQLFreeHandle(SQL_HANDLE_STMT,imp_sth->hstmt); imp_sth->hstmt = SQL_NULL_HSTMT; return 0; } return build_results(aTHX_ sth, imp_sth, dbh, imp_dbh, rc); } /* * AllODBCErrors * ============= * * Given ODBC environment, connection and statement handles (any of which may * be null) this function will retrieve all ODBC errors recorded and * optionally (if output is not 0) output them to the specified log handle. * */ static void AllODBCErrors( HENV henv, HDBC hdbc, HSTMT hstmt, int output, PerlIO *logfp) { SQLRETURN rc; do { UCHAR sqlstate[SQL_SQLSTATE_SIZE+1]; /* ErrorMsg must not be greater than SQL_MAX_MESSAGE_LENGTH */ UCHAR ErrorMsg[SQL_MAX_MESSAGE_LENGTH]; SWORD ErrorMsgLen; SDWORD NativeError; /* TBD: 3.0 update */ rc=SQLError(henv, hdbc, hstmt, sqlstate, &NativeError, ErrorMsg, sizeof(ErrorMsg)-1, &ErrorMsgLen); if (output && SQL_SUCCEEDED(rc)) PerlIO_printf(logfp, "%s %s\n", sqlstate, ErrorMsg); } while(SQL_SUCCEEDED(rc)); return; } /************************************************************************/ /* */ /* check_connection_active */ /* ======================= */ /* */ /************************************************************************/ static int check_connection_active(pTHX_ SV *h) { D_imp_xxh(h); struct imp_dbh_st *imp_dbh = NULL; struct imp_sth_st *imp_sth = NULL; switch(DBIc_TYPE(imp_xxh)) { case DBIt_ST: imp_sth = (struct imp_sth_st *)imp_xxh; imp_dbh = (struct imp_dbh_st *)(DBIc_PARENT_COM(imp_sth)); break; case DBIt_DB: imp_dbh = (struct imp_dbh_st *)imp_xxh; break; default: croak("panic: check_connection_active bad handle type"); } if (!DBIc_ACTIVE(imp_dbh)) { DBIh_SET_ERR_CHAR( h, imp_xxh, Nullch, 1, "Cannot allocate statement when disconnected from the database", "08003", Nullch); return 0; } return 1; } /************************************************************************/ /* */ /* set_odbc_version */ /* ================ */ /* */ /* Set the ODBC version we require. This defaults to ODBC 3 but if */ /* attr contains the odbc_version atttribute this overrides it. If we */ /* fail for any reason the env handle is freed, the error reported and */ /* 0 is returned. If all ok, 1 is returned. */ /* */ /************************************************************************/ static int set_odbc_version( pTHX_ SV *dbh, imp_dbh_t *imp_dbh, SV* attr) { D_imp_drh_from_dbh; SV **svp; UV odbc_version = 0; SQLRETURN rc; DBD_ATTRIB_GET_IV( attr, "odbc_version", 12, svp, odbc_version); if (svp && odbc_version) { rc = SQLSetEnvAttr(imp_drh->henv, SQL_ATTR_ODBC_VERSION, (SQLPOINTER)odbc_version, SQL_IS_INTEGER); } else { /* make sure we request a 3.0 version */ rc = SQLSetEnvAttr(imp_drh->henv, SQL_ATTR_ODBC_VERSION, (SQLPOINTER)SQL_OV_ODBC3, SQL_IS_INTEGER); } if (!SQL_SUCCEEDED(rc)) { dbd_error2( dbh, rc, "db_login/SQLSetEnvAttr", imp_drh->henv, 0, 0); if (imp_drh->connects == 0) { SQLFreeHandle(SQL_HANDLE_ENV, imp_drh->henv); imp_drh->henv = SQL_NULL_HENV; } return 0; } return 1; } /* * post_connect * ========== * * Operations to perform immediately after we have connected. * * NOTE: prior to DBI subversion version 11605 (fixed post 1.607) * DBD_ATTRIB_DELETE segfaulted so instead of calling: * DBD_ATTRIB_DELETE(attr, "odbc_cursortype", * strlen("odbc_cursortype")); * we do the following: * hv_delete((HV*)SvRV(attr), "odbc_cursortype", * strlen("odbc_cursortype"), G_DISCARD); */ static int post_connect( pTHX_ SV *dbh, imp_dbh_t *imp_dbh, SV *attr) { D_imp_drh_from_dbh; SQLRETURN rc; SWORD dbvlen; UWORD supported; /* default this now before we may change it below */ imp_dbh->switch_to_longvarchar = ODBC_SWITCH_TO_LONGVARCHAR; if (DBIc_TRACE(imp_dbh, CONNECTION_TRACING, 0, 0)) TRACE0(imp_dbh, "Turning autocommit on\n"); /* DBI spec requires AutoCommit on */ rc = SQLSetConnectAttr(imp_dbh->hdbc, SQL_AUTOCOMMIT, (SQLPOINTER)SQL_AUTOCOMMIT_ON, 0); if (!SQL_SUCCEEDED(rc)) { dbd_error(dbh, rc, "post_connect/SQLSetConnectAttr(SQL_AUTOCOMMIT)"); SQLFreeHandle(SQL_HANDLE_DBC, imp_dbh->hdbc); if (imp_drh->connects == 0) { SQLFreeHandle(SQL_HANDLE_ENV, imp_drh->henv); imp_drh->henv = SQL_NULL_HENV; imp_dbh->henv = SQL_NULL_HENV; /* needed for dbd_error */ } return 0; } DBIc_set(imp_dbh,DBIcf_AutoCommit, 1); /* get the ODBC compatibility level for this driver */ rc = SQLGetInfo(imp_dbh->hdbc, SQL_DRIVER_ODBC_VER, &imp_dbh->odbc_ver, (SWORD)sizeof(imp_dbh->odbc_ver), &dbvlen); if (!SQL_SUCCEEDED(rc)) { dbd_error(dbh, rc, "post_connect/SQLGetInfo(DRIVER_ODBC_VER)"); strcpy(imp_dbh->odbc_ver, "01.00"); } if (DBIc_TRACE(imp_dbh, CONNECTION_TRACING, 0, 0)) TRACE1(imp_dbh, "DRIVER_ODBC_VER = %s\n", imp_dbh->odbc_ver); /* get ODBC driver name and version */ rc = SQLGetInfo(imp_dbh->hdbc, SQL_DRIVER_NAME, &imp_dbh->odbc_driver_name, (SQLSMALLINT)sizeof(imp_dbh->odbc_driver_name), &dbvlen); if (!SQL_SUCCEEDED(rc)) { dbd_error(dbh, rc, "post_connect/SQLGetInfo(DRIVER_NAME)"); strcpy(imp_dbh->odbc_driver_name, "unknown"); imp_dbh->driver_type = DT_DONT_CARE; } else { if (strcmp(imp_dbh->odbc_driver_name, "SQLSRV32.DLL") == 0) { imp_dbh->driver_type = DT_SQL_SERVER; } else if ((strcmp(imp_dbh->odbc_driver_name, "sqlncli10.dll") == 0) || (strcmp(imp_dbh->odbc_driver_name, "SQLNCLI.DLL") == 0) || (memcmp(imp_dbh->odbc_driver_name, "libmsodbcsql", 13) == 0)) { imp_dbh->driver_type = DT_SQL_SERVER_NATIVE_CLIENT; } else if (strcmp(imp_dbh->odbc_driver_name, "odbcjt32.dll") == 0) { imp_dbh->driver_type = DT_MS_ACCESS_JET; imp_dbh->switch_to_longvarchar = 255; } else if (strcmp(imp_dbh->odbc_driver_name, "ACEODBC.DLL") == 0) { imp_dbh->driver_type = DT_MS_ACCESS_ACE; imp_dbh->switch_to_longvarchar = 255; } else if (strcmp(imp_dbh->odbc_driver_name, "esoobclient") == 0) { imp_dbh->driver_type = DT_ES_OOB; } else if (strcmp(imp_dbh->odbc_driver_name, "OdbcFb") == 0) { imp_dbh->driver_type = DT_FIREBIRD; } else if (memcmp(imp_dbh->odbc_driver_name, "libtdsodbc", 10) == 0) { imp_dbh->driver_type = DT_FREETDS; } else { imp_dbh->driver_type = DT_DONT_CARE; } } if (DBIc_TRACE(imp_dbh, CONNECTION_TRACING, 0, 0)) TRACE2(imp_dbh, "DRIVER_NAME = %s, type=%d\n", imp_dbh->odbc_driver_name, imp_dbh->driver_type); rc = SQLGetInfo(imp_dbh->hdbc, SQL_DRIVER_VER, &imp_dbh->odbc_driver_version, (SQLSMALLINT)sizeof(imp_dbh->odbc_driver_version), &dbvlen); if (!SQL_SUCCEEDED(rc)) { dbd_error(dbh, rc, "post_connect/SQLGetInfo(DRIVER_VERSION)"); strcpy(imp_dbh->odbc_driver_name, "unknown"); } if (DBIc_TRACE(imp_dbh, CONNECTION_TRACING, 0, 0)) TRACE1(imp_dbh, "DRIVER_VERSION = %s\n", imp_dbh->odbc_driver_version); rc = SQLGetInfo(imp_dbh->hdbc, SQL_DBMS_NAME, &imp_dbh->odbc_dbms_name, (SQLSMALLINT)sizeof(imp_dbh->odbc_dbms_name), &dbvlen); if (!SQL_SUCCEEDED(rc)) { dbd_error(dbh, rc, "post_connect/SQLGetInfo(SQL_DBMS_NAME)"); strcpy(imp_dbh->odbc_dbms_name, "unknown"); } rc = SQLGetInfo(imp_dbh->hdbc, SQL_DBMS_VER, &imp_dbh->odbc_dbms_version, (SQLSMALLINT)sizeof(imp_dbh->odbc_dbms_version), &dbvlen); if (!SQL_SUCCEEDED(rc)) { dbd_error(dbh, rc, "post_connect/SQLGetInfo(SQL_DBMS_VER)"); strcpy(imp_dbh->odbc_dbms_version, "unknown"); } /* find maximum column name length */ rc = SQLGetInfo(imp_dbh->hdbc, SQL_MAX_COLUMN_NAME_LEN, &imp_dbh->max_column_name_len, (SWORD) sizeof(imp_dbh->max_column_name_len), &dbvlen); if (!SQL_SUCCEEDED(rc)) { dbd_error(dbh, rc, "post_connect/SQLGetInfo(MAX_COLUMN_NAME_LEN)"); imp_dbh->max_column_name_len = 256; } else if (imp_dbh->max_column_name_len == 0) { imp_dbh->max_column_name_len = 256; } else { if (DBIc_TRACE(imp_dbh, CONNECTION_TRACING, 0, 0)) TRACE1(imp_dbh, "MAX_COLUMN_NAME_LEN = %d\n", imp_dbh->max_column_name_len); } /* find catalog usage */ { char yesno[10]; rc = SQLGetInfo(imp_dbh->hdbc, SQL_CATALOG_NAME, yesno, (SQLSMALLINT) sizeof(yesno), &dbvlen); if (!SQL_SUCCEEDED(rc)) { dbd_error(dbh, rc, "post_connect/SQLGetInfo(SQL_CATALOG_NAME)"); imp_dbh->catalogs_supported = 0; } else if (yesno[0] == 'Y') { imp_dbh->catalogs_supported = 1; } else { imp_dbh->catalogs_supported = 0; } if (DBIc_TRACE(imp_dbh, CONNECTION_TRACING, 0, 0)) TRACE1(imp_dbh, "SQL_CATALOG_NAME = %d\n", imp_dbh->catalogs_supported); } /* find schema usage */ { rc = SQLGetInfo(imp_dbh->hdbc, SQL_SCHEMA_USAGE, &imp_dbh->schema_usage, (SQLSMALLINT) sizeof(imp_dbh->schema_usage), &dbvlen); if (!SQL_SUCCEEDED(rc)) { dbd_error(dbh, rc, "post_connect/SQLGetInfo(SQL_SCHEMA_USAGE)"); imp_dbh->schema_usage = 0; } if (DBIc_TRACE(imp_dbh, CONNECTION_TRACING, 0, 0)) TRACE1(imp_dbh, "SQL_SCHEMA_USAGE = %lu\n", (unsigned long)imp_dbh->schema_usage); } #ifdef WITH_UNICODE imp_dbh->max_column_name_len = imp_dbh->max_column_name_len * sizeof(SQLWCHAR) + 2; #endif if (imp_dbh->max_column_name_len > 512) { imp_dbh->max_column_name_len = 512; DBIh_SET_ERR_CHAR( dbh, (imp_xxh_t*)imp_drh, "0", 1, "Max column name length pegged at 512", Nullch, Nullch); } /* default ignoring named parameters and array operations to false */ imp_dbh->odbc_ignore_named_placeholders = 0; imp_dbh->odbc_array_operations = 0; #ifdef DEFAULT_IS_OFF_NOW_SO_THIS_IS_NOT_REQUIRED /* Disable array operations by default for some drivers as no version I've ever seen works and it annoys the dbix-class guys */ if (imp_dbh->driver_type == DT_FREETDS || imp_dbh->driver_type == DT_MS_ACCESS_JET || imp_dbh->driver_type == DT_MS_ACCESS_ACE) { imp_dbh->odbc_array_operations = 0; } #endif #ifdef WITH_UNICODE imp_dbh->odbc_has_unicode = 1; #else imp_dbh->odbc_has_unicode = 0; #endif if (DBIc_TRACE(imp_dbh, CONNECTION_TRACING, 0, 0)) TRACE1(imp_dbh, "DBD::ODBC is unicode built : %s\n", imp_dbh->odbc_has_unicode ? "YES" : "NO"); imp_dbh->odbc_default_bind_type = 0; imp_dbh->odbc_force_bind_type = 0; #ifdef SQL_ROWSET_SIZE_DEFAULT imp_dbh->rowset_size = SQL_ROWSET_SIZE_DEFAULT; #else /* it should be 1 anyway so above should be redundant but included here partly to remind me what it is */ imp_dbh->rowset_size = 1; #endif /* flag to see if SQLDescribeParam is supported */ imp_dbh->odbc_sqldescribeparam_supported = -1; /* flag to see if SQLDescribeParam is supported */ imp_dbh->odbc_sqlmoreresults_supported = -1; imp_dbh->odbc_defer_binding = 0; imp_dbh->odbc_force_rebind = 0; /* default value for query timeout is -1 which means do not set the query timeout at all. */ imp_dbh->odbc_query_timeout = -1; imp_dbh->odbc_putdata_start = 32768; imp_dbh->odbc_batch_size = 10; imp_dbh->read_only = -1; /* show not set yet */ /*printf("odbc_batch_size defaulted to %d\n", imp_dbh->odbc_batch_size);*/ imp_dbh->odbc_column_display_size = 2001; imp_dbh->odbc_utf8_on = 0; imp_dbh->odbc_exec_direct = 0; /* default to not having SQLExecDirect used */ imp_dbh->odbc_describe_parameters = 1; imp_dbh->RowCacheSize = 1; /* default value for now */ #ifdef WE_DONT_DO_THIS_ANYMORE if (!strcmp(imp_dbh->odbc_dbms_name, "Microsoft SQL Server")) { if (DBIc_TRACE(imp_dbh, CONNECTION_TRACING, 0, 0)) TRACE0(imp_dbh, "Deferring Binding\n"); imp_dbh->odbc_defer_binding = 1; } #endif /* check to see if SQLMoreResults is supported */ rc = SQLGetFunctions(imp_dbh->hdbc, SQL_API_SQLMORERESULTS, &supported); if (SQL_SUCCEEDED(rc)) { if (DBIc_TRACE(imp_dbh, CONNECTION_TRACING, 0, 0)) TRACE1(imp_dbh, "SQLMoreResults supported: %d\n", supported); imp_dbh->odbc_sqlmoreresults_supported = supported ? 1 : 0; } else { imp_dbh->odbc_sqlmoreresults_supported = 0; if (DBIc_TRACE(imp_dbh, CONNECTION_TRACING, 0, 0)) TRACE0(imp_dbh, " !!SQLGetFunctions(SQL_API_SQLMORERESULTS) failed:\n"); AllODBCErrors(imp_dbh->henv, imp_dbh->hdbc, 0, DBIc_TRACE(imp_dbh, DBD_TRACING, 0, 3), DBIc_LOGPIO(imp_dbh)); } /* call only once per connection / DBH -- may want to do * this during the connect to avoid potential threading * issues */ /* check to see if SQLDescribeParam is supported */ rc = SQLGetFunctions(imp_dbh->hdbc, SQL_API_SQLDESCRIBEPARAM, &supported); if (SQL_SUCCEEDED(rc)) { if (DBIc_TRACE(imp_dbh, CONNECTION_TRACING, 0, 0)) TRACE1(imp_dbh, "SQLDescribeParam supported: %d\n", supported); imp_dbh->odbc_sqldescribeparam_supported = supported ? 1 : 0; } else { imp_dbh->odbc_sqldescribeparam_supported = 0; if (DBIc_TRACE(imp_dbh, CONNECTION_TRACING, 0, 0)) TRACE0(imp_dbh, " !!SQLGetFunctions(SQL_API_SQLDESCRIBEPARAM) failed:\n"); AllODBCErrors(imp_dbh->henv, imp_dbh->hdbc, 0, DBIc_TRACE(imp_dbh, DBD_TRACING, 0, 3), DBIc_LOGPIO(imp_dbh)); } /* odbc_cursortype */ { SV **svp; UV odbc_cursortype = 0; DBD_ATTRIB_GET_IV(attr, "odbc_cursortype", 15, svp, odbc_cursortype); if (svp && odbc_cursortype) { if (DBIc_TRACE(imp_dbh, CONNECTION_TRACING, 0, 0)) TRACE1(imp_dbh, " Setting cursor type to: %"UVuf"\n", odbc_cursortype); /* delete odbc_cursortype so we don't see it again via STORE */ (void)hv_delete((HV*)SvRV(attr), "odbc_cursortype", strlen("odbc_cursortype"), G_DISCARD); rc = SQLSetConnectAttr(imp_dbh->hdbc,(SQLINTEGER)SQL_CURSOR_TYPE, (SQLPOINTER)odbc_cursortype, (SQLINTEGER)SQL_IS_INTEGER); if (!SQL_SUCCEEDED(rc) && (DBIc_TRACE(imp_dbh, CONNECTION_TRACING, 0, 0))) TRACE1(imp_dbh, " !!Failed to set SQL_CURSORTYPE to %d\n", (int)odbc_cursortype); } } /* odbc_query_timeout */ { SV **svp; UV odbc_timeout = 0; DBD_ATTRIB_GET_IV( attr, "odbc_query_timeout", strlen("odbc_query_timeout"), svp, odbc_timeout); if (svp && odbc_timeout) { imp_dbh->odbc_query_timeout = odbc_timeout; if (DBIc_TRACE(imp_dbh, CONNECTION_TRACING, 0, 0)) TRACE1(imp_dbh, " Setting DBH query timeout to %d\n", (int)odbc_timeout); /* delete odbc_cursortype so we don't see it again via STORE */ (void)hv_delete((HV*)SvRV(attr), "odbc_query_timeout", strlen("odbc_query_timeout"), G_DISCARD); } } /* odbc_putdata_start */ { SV **svp; IV putdata_start_value; DBD_ATTRIB_GET_IV( attr, "odbc_putdata_start", strlen("odbc_putdata_start"), svp, putdata_start_value); if (svp) { imp_dbh->odbc_putdata_start = putdata_start_value; if (DBIc_TRACE(imp_dbh, CONNECTION_TRACING, 0, 0)) TRACE1(imp_dbh, " Setting DBH putdata_start to %d\n", (int)putdata_start_value); /* delete odbc_putdata_start so we don't see it again via STORE */ (void)hv_delete((HV*)SvRV(attr), "odbc_putdata_start", strlen("odbc_putdata_start"), G_DISCARD); } } /* odbc_column_display_size */ { SV **svp; IV column_display_size_value; DBD_ATTRIB_GET_IV( attr, "odbc_column_display_size", strlen("odbc_column_display_size"), svp, column_display_size_value); if (svp) { imp_dbh->odbc_column_display_size = column_display_size_value; if (DBIc_TRACE(imp_dbh, CONNECTION_TRACING, 0, 0)) TRACE1(imp_dbh, " Setting DBH default column display size to %d\n", (int)column_display_size_value); /* delete odbc_column_display_size so we don't see it again via STORE */ (void)hv_delete((HV*)SvRV(attr), "odbc_column_display_size", strlen("odbc_column_display_size"), G_DISCARD); } } /* odbc_utf8_on */ { SV **svp; IV utf8_on_value; DBD_ATTRIB_GET_IV( attr, "odbc_utf8_on", strlen("odbc_utf8_on"), svp, utf8_on_value); if (svp) { imp_dbh->odbc_utf8_on = utf8_on_value; if (DBIc_TRACE(imp_dbh, CONNECTION_TRACING, 0, 0)) TRACE1(imp_dbh, " Setting UTF8_ON to %d\n", (int)utf8_on_value); /* delete odbc_utf8_on so we don't see it again via STORE */ (void)hv_delete((HV*)SvRV(attr), "odbc_utf8_on", strlen("odbc_utf8_on"), G_DISCARD); } } return 1; } /* * Called when we don't know what to bind a parameter as. This can happen for all sorts * of reasons like: * * o SQLDescribeParam is not supported * o odbc_describe_parameters is set to 0 (in other words telling us not to describe) * o SQLDescribeParam was called and failed * o SQLDescribeParam was called but returned an unrecognised parameter type * * If the data to bind is unicode (SvUTF8 is true) it is bound as SQL_WCHAR * or SQL_WLONGVARCHAR depending on its size. Otherwise it is bound as * SQL_VARCHAR/SQL_LONGVARCHAR. */ static SQLSMALLINT default_parameter_type( char *why, imp_sth_t *imp_sth, phs_t *phs) { SQLSMALLINT sql_type; struct imp_dbh_st *imp_dbh = NULL; imp_dbh = (struct imp_dbh_st *)(DBIc_PARENT_COM(imp_sth)); if (imp_sth->odbc_default_bind_type != 0) { sql_type = imp_sth->odbc_default_bind_type; } else { /* MS Access can return an invalid precision error in the 12blob test unless the large value is bound as an SQL_LONGVARCHAR or SQL_WLONGVARCHAR. Who knows what large is, but for now it is 4000 */ /* Changed to 2000 for the varchar max switch as in a unicode build we can change a string of 'x' x 2001 into 4002 wide chrs and SQL Server will also return invalid precision in this case on a varchar(4000). Of course, being SQL Server, it also has this problem with the newer varchar(8000)! */ if (!SvOK(phs->sv)) { sql_type = ODBC_BACKUP_BIND_TYPE_VALUE; if (DBIc_TRACE(imp_sth, DBD_TRACING, 0, 3)) TRACE2(imp_sth, "%s, sv is not OK, defaulting to %d\n", why, sql_type); } else if (SvCUR(phs->sv) > imp_dbh->switch_to_longvarchar) { #if defined(WITH_UNICODE) if (SvUTF8(phs->sv)) sql_type = SQL_WLONGVARCHAR; else #endif sql_type = SQL_LONGVARCHAR; if (DBIc_TRACE(imp_sth, DBD_TRACING, 0, 3)) TRACE3(imp_sth, "%s, sv=%"UVuf" bytes, defaulting to %d\n", why, (UV)SvCUR(phs->sv), sql_type); } else { #if defined(WITH_UNICODE) if (SvUTF8(phs->sv)) sql_type = SQL_WVARCHAR; else #endif sql_type = SQL_VARCHAR; /*return ODBC_BACKUP_BIND_TYPE_VALUE;*/ if (DBIc_TRACE(imp_sth, DBD_TRACING, 0, 3)) TRACE3(imp_sth, "%s, sv=%"UVuf" bytes, defaulting to %d\n", why, (UV)SvCUR(phs->sv), sql_type); } } return sql_type; } #ifdef WIN32 static HWND GetConsoleHwnd(void) { #define MY_BUFSIZE 1024 /* Buffer size for console window titles. */ HWND hwndFound; /* This is what is returned to the caller. */ char pszNewWindowTitle[MY_BUFSIZE]; /* Contains fabricated WindowTitle. */ char pszOldWindowTitle[MY_BUFSIZE]; /* Contains original WindowTitle */ /* Fetch current window title. */ GetConsoleTitle(pszOldWindowTitle, MY_BUFSIZE); /* Format a "unique" NewWindowTitle. */ wsprintf(pszNewWindowTitle,"%d/%d", GetTickCount(), GetCurrentProcessId()); /* Change current window title. */ SetConsoleTitle(pszNewWindowTitle); /* Ensure window title has been updated. */ Sleep(40); /* Look for NewWindowTitle. */ hwndFound=FindWindow(NULL, pszNewWindowTitle); /* Restore original window title. */ SetConsoleTitle(pszOldWindowTitle); return(hwndFound); } #endif /* WIN32 */ /* * new odbc_rows statement method to workaround RT 81911 in DBI * Just return the last RowCount value suitably mangled like execute does * but without casting to int problem. */ IV odbc_st_rowcount( SV *sth) { dTHX; D_imp_sth(sth); /* SQLLEN rows; SQLRETURN rc;*/ return imp_sth->RowCount; /* rc = SQLRowCount(imp_sth->hstmt, &rows); if (!SQL_SUCCEEDED(rc)) { dbd_error(sth, rc, "odbc_st_rowcount"); return -1; } return rows;*/ } /* TO_DO: * bind_param can be called with no target parameter but to set the parameter type * and it is supposed to be sticky - it is not here. * we don't free up memory allocated * I've no idea what will happen with lobs - probably won't work or will be set * as hex strings (depends on driver mapping of SQL_CHAR to binary columns) */ IV odbc_st_execute_for_fetch( SV *sth, SV *tuples, /* the actual data to bind */ IV count, /* count of rows */ SV *tuple_status) /* returned tuple status */ { dTHX; D_imp_sth(sth); D_imp_dbh_from_sth; SQLRETURN rc; AV *tuples_av, *tuples_status_av; /* array ptrs for tuples and tuple_status */ unsigned int p; /* for loop through parameters */ unsigned long *maxlen; /* array to store max size of each param */ int n_params; /* number of parameters */ unsigned int row; int err_seen = 0; /* some row errored */ int remalloc_svs = 0; /* remalloc the phs sv arrays */ if (DBIc_TRACE(imp_sth, DBD_TRACING, 0, 3)) TRACE2(imp_sth, " +dbd_st_execute_for_fetch(%p) count=%"IVdf"\n", sth, count); if (SQL_NULL_HDBC == imp_dbh->hdbc) { DBIh_SET_ERR_CHAR(sth, (imp_xxh_t*)imp_sth, Nullch, 1, "Database handle has been disconnected", Nullch, Nullch); return -2; } /* Check that the `tuples' parameter is an array ref */ if(!SvROK(tuples) || SvTYPE(SvRV(tuples)) != SVt_PVAV) { croak("odbc_st_execute_for_fetch(): Not an array reference."); } tuples_av = (AV*)SvRV(tuples); /* Check the `tuples_status' parameter. */ if(SvTRUE(tuple_status)) { if(!SvROK(tuple_status) || SvTYPE(SvRV(tuple_status)) != SVt_PVAV) { croak("odbc_st_execute_for_fetch(): tuples_status not an array reference."); } tuples_status_av = (AV*)SvRV(tuple_status); av_fill(tuples_status_av, count - 1); } else { tuples_status_av = NULL; } /* Nothing to do if no tuples. */ if (count <= 0) return 0; /* * if the handle is active, we need to finish it here. * Note that dbd_st_finish already checks to see if it's active. */ dbd_st_finish(sth, imp_sth);; rc = SQLFreeStmt(imp_sth->hstmt, SQL_RESET_PARAMS); if (!SQL_SUCCEEDED(rc)) { dbd_error(sth, rc, "odbc_st_execute_for_fetch/SQL_RESET_PARAMS"); return -2; } if (!imp_sth->all_params_hv) { croak("No parameter hash"); } /* set bind type, parameter set size and parameters processed */ rc = SQLSetStmtAttr(imp_sth->hstmt, SQL_ATTR_PARAM_BIND_TYPE, (SQLPOINTER)SQL_PARAM_BIND_BY_COLUMN, 0); if (!SQL_SUCCEEDED(rc)) { dbd_error(sth, rc, "odbc_st_execute_for_fetch/SQL_ATTR_PARAM_BIND_TYPE"); return -2; } n_params = (int)HvKEYS(imp_sth->all_params_hv); if (DBIc_TRACE(imp_sth, DBD_TRACING, 0, 3)) TRACE1(imp_sth, " num params=%d\n", n_params); /* if count increased free up last param status array */ if (count > imp_sth->allocated_batch_size) { remalloc_svs = 1; /* remalloc strlen_or_ind_array */ if (DBIc_TRACE(imp_sth, DBD_TRACING, 0, 3)) TRACE0(imp_sth, " remallocing sv arrays\n"); if (DBIc_TRACE(imp_sth, DBD_TRACING, 0, 3)) TRACE3(imp_sth, " count increased from %d to %"IVdf" psa=%p\n", imp_sth->allocated_batch_size, count, imp_sth->param_status_array); if (imp_sth->param_status_array) { if (DBIc_TRACE(imp_sth, DBD_TRACING, 0, 3)) TRACE0(imp_sth, " freeing previous parameter status array\n"); Safefree(imp_sth->param_status_array); imp_sth->param_status_array = NULL; } } /* * Set up the parameter status array */ if (!imp_sth->param_status_array) { unsigned int i; if (DBIc_TRACE(imp_sth, DBD_TRACING, 0, 3)) TRACE1(imp_sth, " allocating parameter status array for %"IVdf" rows\n", count); imp_sth->param_status_array = (SQLUSMALLINT *)safemalloc(count * sizeof(SQLUSMALLINT)); /* fill the parameter status array with invalid values so we can see if the driver writes them - some don't in some circumstances */ for (i = 0; i < count; i++) { imp_sth->param_status_array[i] = 9999; } imp_sth->allocated_batch_size = count; } /* Calc max size of each parameter */ maxlen = (unsigned long *)safemalloc(n_params * sizeof(unsigned long)); for (p = 0; p < n_params; p++) { maxlen[p] = 0; } for (row = 0; row < count; row++) { SV **sv_p; SV *sv; AV *av; if (SvTRUE(tuple_status)){ av_store(tuples_status_av, row, newSViv((IV)-1)); /* don't know count */ } sv_p = av_fetch(tuples_av, row, 0); if(sv_p == NULL) { Safefree(maxlen); croak("Cannot fetch tuple %d", row); } sv = *sv_p; if(!SvROK(sv) || SvTYPE(SvRV(sv)) != SVt_PVAV) { Safefree(maxlen); croak("Not an array ref in element %d", row); } av = (AV*)SvRV(sv); for (p = 1; p <= n_params; p++) { STRLEN sv_len; sv_p = av_fetch(av, p-1, 0); if(sv_p == NULL) { Safefree(maxlen); croak("Cannot fetch value for param %d in row %d", p, row); } sv = *sv_p; /*check to see if value sv is a null (undef) if it is upgrade it*/ if (!SvOK(sv)) { (void)SvUPGRADE(sv, SVt_PV); } else { (void)SvPV(sv, sv_len); if ((sv_len + 1) > maxlen[p-1]) { maxlen[p-1] = sv_len + 1; } } } } for (p = 1; p <= n_params; p++) { char name[32]; SV **phs_svp; phs_t *phs; sprintf(name, "%u", p); if (DBIc_TRACE(imp_sth, DBD_TRACING, 0, 4)) TRACE2(imp_sth, " Max size of p%d = %lu\n", p-1, maxlen[p-1]); phs_svp = hv_fetch(imp_sth->all_params_hv, name, strlen(name), 0); if (phs_svp == NULL) { /* TO_DO */ abort(); } phs = (phs_t*)(void*)SvPVX(*phs_svp); if (maxlen[p-1] > 0) { if (phs->param_array_buf) Safefree(phs->param_array_buf); if (DBIc_TRACE(imp_sth, DBD_TRACING, 0, 4)) TRACE3(imp_sth, " allocating %ld * rows=%"IVdf" for p%u\n", maxlen[p-1], count, p); #if defined(WITH_UNICODE) phs->param_array_buf = (char *)safemalloc(maxlen[p-1] * count * sizeof(SQLWCHAR)); #else phs->param_array_buf = (char *)safemalloc(maxlen[p-1] * count); #endif } else { phs->param_array_buf = NULL; } if (remalloc_svs) { if (phs->strlen_or_ind_array) { if (DBIc_TRACE(imp_sth, DBD_TRACING, 0, 4)) TRACE1(imp_sth, " freeing ind array for p%d\n", p); Safefree(phs->strlen_or_ind_array); phs->strlen_or_ind_array = NULL; } } if (!phs->strlen_or_ind_array) { if (DBIc_TRACE(imp_sth, DBD_TRACING, 0, 4)) TRACE2(imp_sth, " allocating %"IVdf" for p%u ind array\n", count * sizeof(SQLULEN), p); phs->strlen_or_ind_array = (SQLLEN *)safemalloc(count * 2 * sizeof(SQLLEN)); } get_param_type(sth, imp_sth, imp_dbh, phs); if (DBIc_TRACE(imp_sth, DBD_TRACING, 0, 4)) PerlIO_printf(DBIc_LOGPIO(imp_sth), " PARAM name=%s sv=%p idx=%u vt=%d (%s) svt=%d (%s) buf=%p ps=%lu dpc=%d dps=%d ml=%"IVdf" dst=%d\n", phs->name, phs->sv, phs->idx, phs->value_type, S_SqlCTypeToString(phs->value_type), phs->sql_type, S_SqlTypeToString(phs->sql_type), phs->param_array_buf, phs->param_size, phs->describe_param_called, phs->describe_param_status, phs->maxlen, phs->described_sql_type); #if defined(WITH_UNICODE) rc = SQLBindParameter(imp_sth->hstmt, p, SQL_PARAM_INPUT, SQL_C_WCHAR, phs->sql_type, maxlen[p-1], 0, phs->param_array_buf, maxlen[p-1] * sizeof(SQLWCHAR), phs->strlen_or_ind_array); #else rc = SQLBindParameter(imp_sth->hstmt, p, SQL_PARAM_INPUT, SQL_C_CHAR, phs->sql_type, maxlen[p-1], 0, phs->param_array_buf, maxlen[p-1], phs->strlen_or_ind_array); #endif if (!SQL_SUCCEEDED(rc)) { Safefree(maxlen); dbd_error(sth, rc, "odbc_st_execute_for_fetch/SQLBindParameter"); return -2; } } for (row = 0; row < count; row++) { SV **sv_p; SV *sv; AV *av; sv_p = av_fetch(tuples_av, row, 0); sv = *sv_p; av = (AV*)SvRV(sv); for (p = 1; p <= n_params; p++) { char name[32]; SV **phs_svp; phs_t *phs; STRLEN sv_len; char *sv_val; sprintf(name, "%u", p); phs_svp = hv_fetch(imp_sth->all_params_hv, name, strlen(name), 0); if (phs_svp == NULL) { abort(); } phs = (phs_t*)(void*)SvPVX(*phs_svp); sv_p = av_fetch(av, phs->idx - 1, 0); if(sv_p == NULL) { Safefree(maxlen); croak("Cannot fetch value for param %d in row %d", p, row); } sv = *sv_p; /*check to see if value sv is a null (undef) if it is upgrade it*/ if (!SvOK(sv)) { (void)SvUPGRADE(sv, SVt_PV); phs->strlen_or_ind_array[row] = SQL_NULL_DATA; } else { #if defined(WITH_UNICODE) SV_toWCHAR(aTHX_ sv); sv_val = SvPV(sv, sv_len); memcpy((char *)(phs->param_array_buf + (row * maxlen[p-1] * sizeof(SQLWCHAR))), sv_val, sv_len); phs->strlen_or_ind_array[row] = sv_len; #else sv_val = SvPV(sv, sv_len); phs->strlen_or_ind_array[row] = SQL_NTS /*strlen(sv_val)*/; strcpy((char *)(phs->param_array_buf + (row * maxlen[p-1])), sv_val); #endif } if (DBIc_TRACE(imp_sth, DBD_TRACING, 0, 4)) PerlIO_printf(DBIc_LOGPIO(imp_sth), " row=%d p%d ind=%ld /%s/\n", row, p, phs->strlen_or_ind_array[row], phs->param_array_buf + (row * maxlen[p-1]) ); if(SvROK(sv)) { Safefree(maxlen); croak("Can't bind a reference (%s) for param %d, row %d", neatsvpv(sv,0), p, row); } } } if (maxlen) Safefree(maxlen); maxlen = NULL; /* We do this as late as possible as we don't want to leave * paramset size set in the statement in case the Perl code does * some other parameter binding without execute_array. */ rc = SQLSetStmtAttr(imp_sth->hstmt, SQL_ATTR_PARAMSET_SIZE, (SQLPOINTER)count, 0); if (!SQL_SUCCEEDED(rc)) { dbd_error(sth, rc, "odbc_st_execute_for_fetch/SQL_ATTR_PARAMSET_SIZE"); return -2; } rc = SQLSetStmtAttr(imp_sth->hstmt, SQL_ATTR_PARAMS_PROCESSED_PTR, (SQLPOINTER)&imp_sth->params_processed, 0); if (!SQL_SUCCEEDED(rc)) { dbd_error(sth, rc, "odbc_st_execute_for_fetch/SQL_ATTR_PARAMS_PROCESSED_PTR"); return -2; } rc = SQLSetStmtAttr(imp_sth->hstmt, SQL_ATTR_PARAM_STATUS_PTR, (SQLPOINTER)imp_sth->param_status_array, 0); if (!SQL_SUCCEEDED(rc)) { dbd_error(sth, rc, "odbc_st_execute_for_fetch/SQL_ATTR_PARAM_STATUS_PTR"); return -2; } rc = SQLExecute(imp_sth->hstmt); /* SQLExecute may fail with SQL_ERROR in which case we have a serious * problem but usually it fails for a row of parameters with * SQL_SUCCESS_WITH_INFO - in the latter case the parameter status * array will indicate and error for this row and we'll pick it up later */ if (DBIc_TRACE(imp_sth, DBD_TRACING, 0, 4)) TRACE1(imp_sth, " SQLExecute=%d\n", rc); if (!SQL_SUCCEEDED(rc)) { dbd_error(sth, rc, "odbc_st_execute_for_fetch/SQLExecute"); /* reset paramset size and params processed */ SQLSetStmtAttr(imp_sth->hstmt, SQL_ATTR_PARAMS_PROCESSED_PTR, (SQLPOINTER)NULL, 0); SQLSetStmtAttr(imp_sth->hstmt, SQL_ATTR_PARAMSET_SIZE, (SQLPOINTER)1, 0); SQLSetStmtAttr(imp_sth->hstmt, SQL_ATTR_PARAM_STATUS_PTR, (SQLPOINTER)NULL, 0); return -2; } else if (rc == SQL_SUCCESS_WITH_INFO) { dbd_error(sth, rc, "odbc_st_execute_for_fetch/SQLExecute"); } if (DBIc_TRACE(imp_sth, DBD_TRACING, 0, 4)) TRACE1(imp_sth, " params processed = %lu\n", imp_sth->params_processed); { unsigned int row; char sqlstate[SQL_SQLSTATE_SIZE+1]; SQLINTEGER native; char msg[256]; /* NOTE, DBI says we fill tuple_status for each row with what execute returns - i.e., row count. It makes more sense for ODBC to fill it with the values in SQL_ATTR_PARAM_STATUS_PTR which are: SQL_PARAM_SUCCESS, SQL_PARAM_SUCCESS_WITH_INFO, SQL_PARAM_ERROR, SQL_PARAM_UNUSED, SQL_PARAM_IGNORE - but we do what DBI says */ /* Don't step beyond Params Processed as if the driver says it has processed N rows and we step past N, the values could be rubbish - the driver probably hasn't even written them. In particular, if we look at param status array after params processed the values will probably be junk (randon values in the malloced data) and it will lead us to think they are not successful - assuming they are not 0 = SQL_PARAM_SUCCESS */ for (row = 0; row < count; row++) { if (DBIc_TRACE(imp_sth, DBD_TRACING, 0, 3)) TRACE2(imp_sth, " row %d, parameter status = %u\n", row, imp_sth->param_status_array[row]); if (imp_sth->params_processed <= row) { /* parameter was not processed so no point in looking at parameter status array */ av_store(tuples_status_av, row, newSViv((IV) -1)); } else if (imp_sth->param_status_array[row] == 9999) { SV *err_svs[3]; if (SvTRUE(tuple_status)){ err_svs[0] = newSViv((IV)1); err_svs[1] = newSVpv("warning: parameter status was not returned", 0); err_svs[2] = newSVpv("HY000", 0); av_store(tuples_status_av, row, newRV_noinc((SV *)(av_make(3, err_svs)))); } DBIh_SET_ERR_SV(sth, (imp_xxh_t*)imp_sth, newSVpv("",0), err_svs[1], err_svs[2], &PL_sv_undef); } else if ((imp_sth->param_status_array[row] == SQL_PARAM_SUCCESS) || (imp_sth->param_status_array[row] == SQL_PARAM_UNUSED) || (imp_sth->param_status_array[row] == SQL_PARAM_DIAG_UNAVAILABLE)) { /* We'll never get SQL_PARAM_IGNORE as we never set a row operations array */ /* Some drivers which do SQL_PARC_NO_BATCH will set SQL_PARAM_DIAG_UNAVAILABLE for every row as they cannot tell us on a per row basis. Treat these as success as they are since the call the SQLExecute above would have failed otherwise. */ /* DBI requires us to set each tuple_status to the rows * affected but we don't know it on a per row basis so. In any case in * order to count which tuples were executed and which were not we need * to return SQL_PARAM_SUCCES/SQL_PARAM_UNUSED - obviously any rows in * error were executed. The code above needs to translate ODBC statuses.*/ if (SvTRUE(tuple_status)){ av_store(tuples_status_av, row, newSViv((IV) imp_sth->param_status_array[row])); /*av_store(tuples_status_av, row, newSViv((IV)-1));*/ } } else { /* SQL_PARAM_ERROR or SQL_PARAM_SUCCESS_WITH_INFO */ SV *err_svs[3]; int found; /* Some drivers won't support SQL_DIAG_ROW_NUMBER so we cannot be sure which diag relates to which row. 'found' tells us if we found a diag for row 'row+1' but in any case what can we do if we don't - so we just report whatever diag we have */ found = get_row_diag(row+1, imp_sth, sqlstate, &native, msg, sizeof(msg)); if (SvTRUE(tuple_status)){ err_svs[0] = newSViv((IV)native); err_svs[1] = newSVpv(msg, 0); err_svs[2] = newSVpv(sqlstate, 0); av_store(tuples_status_av, row, newRV_noinc((SV *)(av_make(3, err_svs)))); } DBIh_SET_ERR_CHAR(sth, (imp_xxh_t*)imp_sth, Nullch, 1, msg, sqlstate, Nullch); err_seen++; } } } /* reset paramset size and params processed */ SQLSetStmtAttr(imp_sth->hstmt, SQL_ATTR_PARAMS_PROCESSED_PTR, (SQLPOINTER)NULL, 0); SQLSetStmtAttr(imp_sth->hstmt, SQL_ATTR_PARAMSET_SIZE, (SQLPOINTER)1, 0); SQLSetStmtAttr(imp_sth->hstmt, SQL_ATTR_PARAM_STATUS_PTR, (SQLPOINTER)NULL, 0); rc = SQLRowCount(imp_sth->hstmt, &imp_sth->RowCount); if (DBIc_TRACE(imp_sth, DBD_TRACING, 0, 3)) TRACE2(imp_sth, " SQLRowCount=%d (rows=%"IVdf")\n", rc, (IV)imp_sth->RowCount); if (rc != SQL_SUCCESS) { /* TO_DO free strlen_or_ind_array */ /* on the other hand since batch_size is always constant we could leave this for other execute_for_fetches */ /*safefree(phs->strlen_or_ind_array);*/ /*phs->strlen_or_ind_array = NULL;*/ dbd_error(sth, rc, "odbc_st_execute_for_fetch/SQLRowCount"); return -2; } DBIc_ROW_COUNT(imp_sth) = imp_sth->RowCount; /* why does this break stuff imp_sth->param_status_array = NULL; */ if (err_seen) { return -2; } else { return imp_sth->RowCount; } } /* * get_row_diag * * When we are doing execute_for_fetch/execute_array we bind rows of * parameters. When one of more fail we have a list of diagnostics and * the driver manager may reorder them in severity order. Also, each row * in error could generate multiple error diagnostics e.g., * attempting to insert too much data generates: * diag 1 22001, 2290136, [Microsoft][ODBC SQL Server Driver][SQL Server]String or binary data would be truncated. * diag 2 01000, 2290136, [Microsoft][ODBC SQL Server Driver][SQL Server]The statem ent has been terminated. * * Fortunately for us, each diagnostic contains the row number the error relates * to (in working drivers). This function is passed the row we have detected * in error and attempts to find the relevant error - it always returns the * first error (if there is more than one). * * We return 1 if any error for the supplied recno is found else 0 * Also, if SQLGetDiagRec fails we fill state, native, msg with a values saying so * so you can rely on the fact state, native and msg are at least set. */ static int get_row_diag(SQLSMALLINT recno, imp_sth_t *imp_sth, char *state, SQLINTEGER *native, char *msg, size_t max_msg) { SQLSMALLINT i = 1; SQLRETURN rc; SQLSMALLINT msg_len; if (DBIc_TRACE(imp_sth, DBD_TRACING, 0, 3)) TRACE1(imp_sth, " +get_row_diag for row %d\n", recno); /*printf("get_row_diag %d\n", recno);*/ /* SQLRETURN return_code; rc = SQLGetDiagField(SQL_HANDLE_STMT, imp_sth->hstmt, 0, SQL_DIAG_RETURNCODE, &return_code, 0, NULL); printf("return code = %d\n", return_code); */ while(SQL_SUCCEEDED(rc = SQLGetDiagRec(SQL_HANDLE_STMT, imp_sth->hstmt, i, state, native, msg, max_msg, &msg_len))) { /*SQLINTEGER col;*/ SQLLEN row; if (DBIc_TRACE(imp_sth, DBD_TRACING, 0, 3)) PerlIO_printf(DBIc_LOGPIO(imp_sth), " diag %d %s, %ld, %s\n", i, state, (long)*native, msg); /*printf("diag %d %s, %ld, %s\n", i, state, native, msg);*/ if (max_msg < 100) { croak("Come on, code needs some space to put the diag message"); } rc = SQLGetDiagField(SQL_HANDLE_STMT, imp_sth->hstmt, i, SQL_DIAG_ROW_NUMBER, &row, 0, NULL); if (SQL_SUCCEEDED(rc)) { /* Could return SQL_ROW_NUMBER_UNKNOWN or SQL_NO_ROW_NUMBER */ if (DBIc_TRACE(imp_sth, DBD_TRACING, 0, 3)) PerlIO_printf(DBIc_LOGPIO(imp_sth), " diag row=%ld\n", row); /* few drivers support SQL_DIAG_COLUMN_NUMBER - most return -1 unfortunately rc = SQLGetDiagField(SQL_HANDLE_STMT, imp_sth->hstmt, i, SQL_DIAG_COLUMN_NUMBER, &col, 0, NULL); printf(" row %d col %ld\n", row, col); */ if (row == (SQLLEN)recno) return 1; } else if (DBIc_TRACE(imp_sth, DBD_TRACING, 0, 3)) { TRACE0(imp_sth, "SQLGetDiagField for SQL_DIAG_ROW_NUMBER failed"); } i++; /* next record */ }; /* will be SQL_NO_DATA if we reach the end of diags without finding anything */ /* TO_DO some drivers are not going to support SQL_DIAG_COLUMN_NUMBER so we should do better than below - maybe show the first/last error */ strcpy(state, "HY000"); *native = 1; strcpy(msg, "failed to retrieve diags"); return 0; } /* * taf_callback_wrapper is the function we pass to Oracle to be called * when a connection fails. We asked the ODBC driver to pass our dbh * handle in and it also gives us the type and event. We just pass all * these args off to the registered Perl subroutine and return to * the Oracle driver whatever that Perl sub returns to us. In this way * the user's Perl dictates what happens in the failover process and not * us. */ static int taf_callback_wrapper ( void *handle, int type, int event) { dTHX; int return_count; int ret; SV* dbh = (SV *)handle; D_imp_dbh(dbh); dSP; PUSHMARK(SP); XPUSHs(handle); XPUSHs(sv_2mortal(newSViv(event))); XPUSHs(sv_2mortal(newSViv(type))); PUTBACK; return_count = call_sv(imp_dbh->odbc_taf_callback, G_SCALAR); SPAGAIN; if (return_count != 1) croak("Expected one scalar back from taf handler"); ret = POPi; PUTBACK; return ret; } static void check_for_unicode_param( imp_sth_t *imp_sth, phs_t *phs) { if (DBIc_TRACE(imp_sth, DBD_TRACING, 0, 5)) { TRACE2(imp_sth, "check_for_unicode_param - sql_type=%s, described=%s\n", S_SqlTypeToString(phs->sql_type), S_SqlTypeToString(phs->described_sql_type)); } /* If we didn't called SQLDescribeParam successfully, we've defaulted/guessed so just return as sql_type will already be set */ if (!phs->described_sql_type) return; if (SvUTF8(phs->sv)) { if (phs->described_sql_type == SQL_CHAR) { phs->sql_type = SQL_WCHAR; } else if (phs->described_sql_type == SQL_VARCHAR) { phs->sql_type = SQL_WVARCHAR; } else if (phs->described_sql_type == SQL_LONGVARCHAR) { phs->sql_type = SQL_WLONGVARCHAR; } else { phs->sql_type = phs->described_sql_type; } if (DBIc_TRACE(imp_sth, DBD_TRACING, 0, 5) && (phs->sql_type != phs->described_sql_type)) TRACE1(imp_sth, " SvUTF8 parameter - changing to %s type\n", S_SqlTypeToString(phs->sql_type)); } else { if (phs->described_sql_type == SQL_NUMERIC || phs->described_sql_type == SQL_DECIMAL || phs->described_sql_type == SQL_FLOAT || phs->described_sql_type == SQL_REAL || phs->described_sql_type == SQL_DOUBLE) { phs->sql_type = SQL_VARCHAR; } else { phs->sql_type = phs->described_sql_type; } } } AV* dbd_data_sources(SV *drh ) { dTHX; SQLUSMALLINT fDirection = SQL_FETCH_FIRST; RETCODE rc; SQLCHAR dsn[SQL_MAX_DSN_LENGTH+1+9 /* strlen("DBI:ODBC:") */]; SQLSMALLINT dsn_length; SQLCHAR description[256]; SQLSMALLINT description_length; AV *ds = newAV(); D_imp_drh(drh); if (!imp_drh->connects) { rc = SQLAllocEnv(&imp_drh->henv); if (!SQL_ok(rc)) { imp_drh->henv = SQL_NULL_HENV; dbd_error(drh, rc, "data_sources/SQLAllocEnv"); return NULL; } } strcpy(dsn, "dbi:ODBC:"); while (1) { description[0] = '\0'; rc = SQLDataSources(imp_drh->henv, fDirection, dsn+9, /* strlen("dbi:ODBC:") */ SQL_MAX_DSN_LENGTH, &dsn_length, description, sizeof(description), &description_length); if (!SQL_ok(rc)) { if (rc != SQL_NO_DATA_FOUND) { /* * Temporarily increment imp_drh->connects, so * that dbd_error uses our henv. */ imp_drh->connects++; dbd_error(drh, rc, "data_sources/SQLDataSources"); imp_drh->connects--; } break; } av_push( ds, newSVpv(dsn, dsn_length + 9 ) ); fDirection = SQL_FETCH_NEXT; } if (!imp_drh->connects) { SQLFreeEnv(imp_drh->henv); imp_drh->henv = SQL_NULL_HENV; } return ds; } /* end */ DBD-ODBC-1.61/fixup_c.h0000644000175000017500000000037312250310263013710 0ustar martinmartin /* fix up constants and other fundamentals that some driver managers */ /* don't define (basically iODBC) */ #ifndef SQL_API_ALL_FUNCTIONS #define SQL_API_ALL_FUNCTIONS 0 #endif #ifndef SQL_SQLSTATE_SIZE #define SQL_SQLSTATE_SIZE 5 #endif DBD-ODBC-1.61/examples/0000755000175000017500000000000013614770375013740 5ustar martinmartinDBD-ODBC-1.61/examples/odbc_diag.pl0000644000175000017500000000466012250310263016153 0ustar martinmartin# $Id$ # # Demonstrate the experimental odbc_getdiagrec and odbc_getdiagfield # use strict; use warnings; use DBI; use Data::Dumper; use Test::More; use DBD::ODBC qw(:diags); # header fields: #define SQL_DIAG_CURSOR_ROW_COUNT (-1249) #define SQL_DIAG_DYNAMIC_FUNCTION 7 #define SQL_DIAG_DYNAMIC_FUNCTION_CODE 12 #define SQL_DIAG_NUMBER 2 #define SQL_DIAG_RETURNCODE 1 #define SQL_DIAG_ROW_COUNT 3 my @hdr_fields = (SQL_DIAG_CURSOR_ROW_COUNT, SQL_DIAG_DYNAMIC_FUNCTION, SQL_DIAG_DYNAMIC_FUNCTION_CODE, SQL_DIAG_NUMBER, SQL_DIAG_RETURNCODE, SQL_DIAG_ROW_COUNT); # record fields: #define SQL_DIAG_CLASS_ORIGIN 8 #define SQL_DIAG_COLUMN_NUMBER (-1247) #define SQL_DIAG_CONNECTION_NAME 10 #define SQL_DIAG_MESSAGE_TEXT 6 #define SQL_DIAG_NATIVE 5 #define SQL_DIAG_ROW_NUMBER (-1248) #define SQL_DIAG_SERVER_NAME 11 #define SQL_DIAG_SQLSTATE 4 #define SQL_DIAG_SUBCLASS_ORIGIN 9 my @record_fields = (SQL_DIAG_CLASS_ORIGIN, SQL_DIAG_COLUMN_NUMBER, SQL_DIAG_CONNECTION_NAME, SQL_DIAG_MESSAGE_TEXT, SQL_DIAG_NATIVE, SQL_DIAG_ROW_NUMBER, SQL_DIAG_SERVER_NAME, SQL_DIAG_SQLSTATE, SQL_DIAG_SUBCLASS_ORIGIN); sub get_fields { my ($h, $record) = @_; foreach (@hdr_fields, @record_fields) { eval { my $x = $h->odbc_getdiagfield($record, $_); diag("$_ = " . ($x ? $x : 'undef') . "\n"); }; if ($@) { diag("diag field $_ errored\n"); } } } my $h = DBI->connect("dbi:ODBC:DSN=SQLite",undef,undef, {RaiseError => 1, PrintError => 0}); my ($s, @diags); @diags = $h->odbc_getdiagrec(1); is(scalar(@diags), 0, 'no dbh diags after successful connect') or explain(@diags); my $ok = eval { $h->get_info(9999); 1; }; ok(!$ok, "SQLGetInfo fails"); @diags = $h->odbc_getdiagrec(1); is(scalar(@diags), 3, ' and 3 diag fields returned'); diag(Data::Dumper->Dump([\@diags], [qw(diags)])); get_fields($h, 1); @diags = $h->odbc_getdiagrec(2); is(scalar(@diags), 0, ' and no second record diags'); $ok = eval { # some drivers fail on the prepare - some don't fail until execute $s = $h->prepare(q/select * from table_does_not_exist/); $s->execute; 1; }; ok(!$ok, "select on non-existant table fails"); my $hd = $s || $h; @diags = $hd->odbc_getdiagrec(1); is(scalar(@diags), 3, ' and 3 diag fields returned'); diag(Data::Dumper->Dump([\@diags], [qw(diags)])); get_fields($hd, 1); done_testing(); DBD-ODBC-1.61/examples/testPrc.sql0000644000175000017500000000035012250310263016060 0ustar martinmartin-- $Id$ -- CREATE PROCEDURE dbo.testPrc @parameter1 int = 22 AS /* SET NOCOUNT ON */ select 1 as some_data select isnull(@parameter1, 0) as parameter1, 3 as some_more_data -- print 'kaboom' RETURN(@parameter1 + 1) DBD-ODBC-1.61/examples/DbiTest2.pl0000644000175000017500000000534312250310263015677 0ustar martinmartinuse strict; # $Id$ use warnings; use DBI; use Data::Dumper; $Data::Dumper::Maxdepth = 4; use constant LONG_READ_LEN => 8000; my %options = ( DbSrcServer => '(local)', DbSrcDatabase => 'Helpdesk2', DbSrcLoginName => 'sa', DbSrcPassword => '', ); my @dbhPool; ########################################## ### Functions ########################################## sub newDbh() {my $dbh; if(defined($options{DbSrcServer}) && defined($options{DbSrcLoginName}) && defined($options{DbSrcDatabase})) { my $dsn = "DRIVER={SQL Server};SERVER=$options{DbSrcServer};DATABASE=$options{DbSrcDatabase};NETWORK=dbmssocn;UID=$options{DbSrcLoginName};PWD=$options{DbSrcPassword}"; # print "DSN: $dsn\n\n"; $dbh = DBI->connect("DBI:ODBC:$dsn") || die "DBI connect failed: $DBI::errstr\n"; $dbh->{AutoCommit} = 0; # enable transactions, if possible $dbh->{RaiseError} = 0; $dbh->{PrintError} = 1; # use RaiseError instead $dbh->{ShowErrorStatement} = 1; push @dbhPool, $dbh; return($dbh); } } sub test($) { my ($outputTempate) = @_; my $dbh = newDbh(); my $sth = $dbh->prepare('select ID from (select 1 as ID union select 2 as ID union select 3 as ID) tmp order by ID'); $sth->execute(); # print '$sth->{Active}: ', $sth->{Active}, "\n"; do { for(my $rowRef = undef; $rowRef = $sth->fetchrow_hashref('NAME'); ) { #print '%$rowRef ', Dumper(\%$rowRef), "\n"; innerTest($outputTempate); } } while($sth->{odbc_more_results}); } my $innerTestSth; sub innerTest($) { my ($outputTempate) = @_; my %outputData; my $queryInputParameter1 = 2222; my $queryOutputParameter = $outputTempate; my $sth; if(!defined $innerTestSth) { my $dbh = newDbh(); $innerTestSth = $dbh->prepare('{? = call testPrc(?) }'); } $sth = $innerTestSth; $sth->bind_param_inout(1, \$queryOutputParameter, 30, { TYPE => DBI::SQL_INTEGER }); $sth->bind_param(2, $queryInputParameter1, { TYPE => DBI::SQL_INTEGER }); # $sth->trace(1);#, 'DbiTest.txt'); $sth->execute(); print '$sth->{Active}: ', $sth->{Active}, "\n"; do { for(my $rowRef = undef; $rowRef = $sth->fetchrow_hashref('NAME'); ) { print '%$rowRef2 ', Dumper(\%$rowRef), "\n"; } } while($sth->{odbc_more_results}); print '$queryOutputParameter: \'', $queryOutputParameter, '\' expected: (', $queryInputParameter1 + 1, ")\n\n"; } ########################################## ### Test ########################################## #test(0); #test(10); #test(100); #test(' '); test(10); ########################################## ### Cleanup... ########################################## foreach my $dbh (@dbhPool) { $dbh->rollback(); $dbh->disconnect(); } DBD-ODBC-1.61/examples/testundef2.pl0000755000175000017500000000165512250310263016347 0ustar martinmartin#!perl -w # $Id$ use strict; use DBI qw(:sql_types); my $dbh=DBI->connect() or die "Can't connect"; eval {$dbh->do("DROP TABLE table1");}; eval {$dbh->do("CREATE TABLE table1 (v varchar(4000), d datetime)");}; unlink("dbitrace.log") if (-e "dbitrace.log") ; $dbh->trace(8, "dbitrace.log"); my $sth = $dbh->prepare ("INSERT INTO table1 (d, v) VALUES (?, ?)"); $sth->bind_param (1, undef, SQL_TYPE_TIMESTAMP); $sth->bind_param (2, undef, SQL_LONGVARCHAR); $sth->execute(); $sth->bind_param (1, "2002-07-12 17:07:37.350", SQL_TYPE_TIMESTAMP); $sth->bind_param (2, "real data", SQL_LONGVARCHAR); $sth->execute(); $sth->bind_param (1, undef, SQL_TYPE_TIMESTAMP); $sth->bind_param (2, undef, SQL_LONGVARCHAR); $sth->execute(); $sth = $dbh->prepare("select d, v from table1"); $sth->execute; my @row; while (@row = $sth->fetchrow_array) { foreach (@row) { $_ = "" if (!defined($_)); } print join(", ", @row), "\n"; } $dbh->disconnect; DBD-ODBC-1.61/examples/identity.pl0000644000175000017500000000113712250310263016105 0ustar martinmartin#!perl -w # $Id$ use strict; use DBI; my $dbh = DBI->connect(); # create a temp table with an identity property on a column: my $sql = qq{CREATE TABLE #TEMP1 (MyCol INT NOT NULL IDENTITY)}; $dbh->do($sql); # Set the identity insert property for this table on # this should allow me to explicitly give a value to be inserted into the # identity column: $sql = qq{SET IDENTITY_INSERT #TEMP1 ON}; $dbh->do($sql); # Added by JLU # now try to insert an explicit value into this identity column: $sql = qq{INSERT INTO #TEMP1 (MyCol) VALUES (1)}; $dbh->do($sql); $dbh->disconnect; DBD-ODBC-1.61/examples/big_result.pl0000644000175000017500000000177312250310263016421 0ustar martinmartin# $Id$ # create a table with lots of big rows and see how long it takes to # get it back # run once with any command line argument (to create and populate the table) # then later without the argument use DBI; use strict; use warnings; use Benchmark::Timer; use Data::Dumper; my $t = Benchmark::Timer->new; $t->start('main'); my $h = DBI->connect; if ($ARGV[0]) { print "Recreating table\n"; eval {$h->do(q/drop table mje/);}; $h->do(q/create table mje (a varchar(50), b varchar(50), c varchar(50), d varchar(50))/); $h->begin_work; my $s = $h->prepare(q/insert into mje values(?,?,?,?)/); my $a = 'a' x 50; my $b = 'b' x 50; my $c = 'c' x 50; my $d = 'd' x 50; foreach (1..50000) { $s->execute($a, $b, $c, $d); } $h->commit; } $t->stop('main'); $t->start('fetch'); my $r = $h->selectall_arrayref(q/select * from mje/); $t->stop('fetch'); #$t->start('dump'); #print Dumper($r); #$t->stop('dump'); print "Rows fetched:", scalar(@$r), "\n"; print $t->reports; DBD-ODBC-1.61/examples/testconnspeed.pl0000644000175000017500000000162412250310263017133 0ustar martinmartin#!perl -w use strict; use DBI; use Benchmark qw(timethese cmpthese timeit countit timestr); my %conns = ( DBD_ORACLE => sub { doconnect("dbi:Oracle:URLWINLT"); }, DBD_ODBC_ORACLE => sub { doconnect("dbi:ODBC:PERL_TEST_ORACLE"); }, DBD_ODBC_MSORACLE => sub { doconnect("dbi:ODBC:PERL_TEST_MSORACLE"); }, DBD_ODBC_SQLSERVER => sub { doconnect("dbi:ODBC:PERL_TEST_SQLSERVER"); }, DBD_ODBC_DB2 => sub { doconnect("dbi:ODBC:PERL_TEST_DB2"); }, DBD_ODBC_ACCESS => sub { doconnect("dbi:ODBC:PERL_TEST_ACCESS"); }, ); sub doconnect ($) { my $connstr = shift; my $dbh = DBI->connect($connstr, $ENV{DBI_USER}, $ENV{DBI_PASS}, { RaiseError => 1, PrintError => 1 } ) || die "Can't connect with $connstr: $DBI::errstr"; } timethese 100, \%conns; cmpthese 100, \%conns; DBD-ODBC-1.61/examples/longbin.pl0000644000175000017500000000503512250310263015705 0ustar martinmartin#/usr/bin/perl -w # $Id$ use strict; use DBI qw (:sql_types); use Digest::MD5 qw(md5 md5_hex); my $dbh = DBI->connect(); $dbh->{RaiseError} = 1; # raise the error $dbh->{PrintError} = 0; # but don't print it. $dbh->{odbc_default_bind_type} = 0; eval { # if it's not already created, the eval will silently ignore this $dbh->do("drop table longtest;"); }; # probably should use get_info to get the type for long here... my $dbname = $dbh->get_info(17); # DBI::SQL_DBMS_NAME my $longbinary_type = get_first_type_info($dbh, SQL_LONGVARBINARY); my $integer_type = get_first_type_info($dbh, SQL_INTEGER); print "$dbname, ($integer_type, $longbinary_type)\n"; $dbh->do("Create table longtest (id $integer_type, picture $longbinary_type)"); my $sth = $dbh->prepare("insert into longtest (id, picture) values (?, ?)"); my $id = 0; my $file; my @md5sums = (); foreach $file (@ARGV) { my $blob; eval { print "Reading: $file\n"; $blob = readblobfile($file); }; if (!$@) { $md5sums[$id] = md5_hex($blob); $sth->bind_param(1, $id); #DBI::SQL_INTEGER); # with access, you must bind to SQL_LONGVARBINARY! Otherwise, it doesn't work. # oracle and SQL Server handle the types correctly... if ($dbname =~ /Access/i) { $sth->bind_param(2, $blob, DBI::SQL_LONGVARBINARY); } else { $sth->bind_param(2, $blob); } $sth->execute; $id++; } else { printf("Couldn't read file: $@\n"); } } # now check the data, just out of paranoia... $dbh->{LongReadLen} = 2000000; $dbh->{LongTruncOk} = 0; my $sthr = $dbh->prepare("select id, picture from longtest order by id"); $sthr->execute; my @row; while (@row = $sthr->fetchrow_array) { my $digest = md5_hex($row[1]); if ($digest ne $md5sums[$row[0]]) { print "$row[0]: Digests don't match $digest, $md5sums[$row[0]]!\n"; } else { print "Good read!\n"; } } $dbh->disconnect(); sub readblobfile($) { my $filename = shift; local(*FILE, $\); # automatically close file at end of scope open(FILE, "<$filename") or die "Can't open file $!\n"; binmode(FILE); ; } sub getFileMD5 ($) { my $filename = shift; open(F, $filename) or die "Can't open file name $filename\n"; binmode(F); my $md5 = new MD5; seek(F, 0, 0); # just in case? part of docs, I left in. $md5->reset; $md5->addfile(\*F); close(F); $md5->hexdigest; } sub get_first_type_info($$) { my $dbh = shift; my $type = shift; my @typeinfo = $dbh->type_info($type); return $typeinfo[0]->{TYPE_NAME}; } DBD-ODBC-1.61/examples/testdisc.pl0000644000175000017500000000034512250310263016076 0ustar martinmartin#!perl -w # $Id$ use strict; use DBI; my $dbh = DBI->connect() or die "connect"; $dbh->disconnect; eval { my $sth = $dbh->tables(); }; eval { my $sth2 = $dbh->prepare("select sysdate from dual"); }; DBD-ODBC-1.61/examples/testgetinfo.pl0000644000175000017500000000050012250310263016600 0ustar martinmartinuse DBI; # $Id$ use DBI::Const; $\ = "\n"; $, = ": "; my $dbh = DBI->connect or die $DBI::errstr; $dbh->{ RaiseError } = 1; $dbh->{ PrintError } = 1; for ( @ARGV ? @ARGV : sort keys %DBI::Const::GetInfo ) { my $Val = $dbh->get_info( $DBI::Const::GetInfo{$_} ); printf " %-35s%s\n", $_, $Val if defined $Val; } DBD-ODBC-1.61/examples/unicode_sql.pl0000644000175000017500000000203412250310263016556 0ustar martinmartin# $Id$ # # Small example showing how you can insert unicode inline in the SQL # # expected output: #Has unicode: 1 #$VAR1 = [ # [ # "\x{20ac}" # ] # ]; #$VAR1 = [ # [ # "\x{20ac}" # ], # [ # "\x{20ac}" # ] # ]; # use DBI; use strict; use warnings; use Data::Dumper; my $h = DBI->connect(); #$h->{odbc_default_bind_type} = 12; warn "Warning DBD::ODBC not built for unicode - this will not work as expected" if !$h->{'odbc_has_unicode'}; eval {$h->do(q/drop table martin/);}; print "Has unicode: " . $h->{odbc_has_unicode} . "\n"; $h->do(q/create table martin (a nvarchar(100))/); my $s = $h->prepare(q/insert into martin values(?)/); $s->execute("\x{20ac}"); my $r = $h->selectall_arrayref(q/select * from martin/); print Dumper($r); my $sql = 'insert into martin values(' . $h->quote("\x{20ac}") . ')'; $h->do($sql); $r = $h->selectall_arrayref(q/select * from martin/); print Dumper($r); #--with-iconv-char-enc= #--with-iconv-ucode-enc=enc DBD-ODBC-1.61/examples/testproc.pl0000755000175000017500000000155112250310263016122 0ustar martinmartin#!perl -w # $Id$ use strict; use DBI; # Connect to the database, and create a table and stored procedure: my $dbh=DBI->connect("dbi:ODBC:PERL_TEST_SQLSERVER", $ENV{DBI_USER}, $ENV{DBI_PASS}, { RaiseError => 1 }) or die "Can't connect"; eval {$dbh->do("DROP TABLE table1");}; eval {$dbh->do("CREATE TABLE table1 (i INTEGER)");}; eval {$dbh->do("DROP PROCEDURE proc1");}; my $proc1 = "CREATE PROCEDURE proc1 AS ". "BEGIN". " INSERT INTO table1 VALUES (100);". # breaks fetchrow_array " SELECT 9;". "END"; eval {$dbh->do ($proc1);}; # Execute it: if (-e "dbitrace.log") { unlink("dbitrace.log"); } $dbh->trace(9, "dbitrace.log"); my $sth = $dbh->prepare ("exec proc1"); $sth->execute (); do { while (my $result = $sth->fetchrow_array()) { print "result = $result\n"; } } while ($sth->{odbc_more_results}); $dbh->disconnect; DBD-ODBC-1.61/examples/DbiTest.pl0000644000175000017500000000445312250310263015616 0ustar martinmartinuse strict; # $Id$ use warnings; use DBI; use Data::Dumper; $Data::Dumper::Maxdepth = 4; use constant LONG_READ_LEN => 8000; my %options = ( DbSrcServer => '(local)', DbSrcDatabase => 'Helpdesk2', DbSrcLoginName => 'sa', DbSrcPassword => '', ); my @dbhPool; ########################################## ### Functions ########################################## sub newDbh() {my $dbh; if(defined($options{DbSrcServer}) && defined($options{DbSrcLoginName}) && defined($options{DbSrcDatabase})) { my $dsn = "DRIVER={SQL Server};SERVER=$options{DbSrcServer};DATABASE=$options{DbSrcDatabase};NETWORK=dbmssocn;UID=$options{DbSrcLoginName};PWD=$options{DbSrcPassword}"; # print "DSN: $dsn\n\n"; $dbh = DBI->connect("DBI:ODBC:$dsn") || die "DBI connect failed: $DBI::errstr\n"; $dbh->{AutoCommit} = 0; # enable transactions, if possible $dbh->{RaiseError} = 0; $dbh->{PrintError} = 1; # use RaiseError instead $dbh->{ShowErrorStatement} = 1; push @dbhPool, $dbh; return($dbh); } } sub test($;$) { my ($outputTempate, $recurse) = @_; my $dbh = newDbh(); my $queryInputParameter1 = 2222; my $queryOutputParameter = $outputTempate; my $sth = $dbh->prepare('{? = call testPrc(?) }'); $sth->bind_param_inout(1, \$queryOutputParameter, 30, { TYPE => DBI::SQL_INTEGER }); $sth->bind_param(2, $queryInputParameter1, { TYPE => DBI::SQL_INTEGER }); # $sth->trace(1);#, 'DbiTest.txt'); $sth->execute(); print '$sth->{Active}: ', $sth->{Active}, "\n"; do { for(my $rowRef; $rowRef = $sth->fetchrow_hashref('NAME'); ) { my %outputData = %$rowRef; print 'outputData ', Dumper(\%outputData), "\n"; if($recurse > 0) { test($dbh, --$recurse); } } } while($sth->{odbc_more_results}); print '$queryOutputParameter: \'', $queryOutputParameter, '\' expected: (', $queryInputParameter1 + 1, ")\n\n"; } ########################################## ### Test ########################################## test(0, 0); test(10, 0); test(100, 0); test(' ', 0); test(0, 1); #recusion ########################################## ### Cleanup... ########################################## foreach my $dbh (@dbhPool) { $dbh->rollback(); $dbh->disconnect(); } DBD-ODBC-1.61/examples/testundef.pl0000644000175000017500000000117612250310263016260 0ustar martinmartin#perl -w # $Id$ use DBI; my $dbh = DBI->connect() or die "$DBI::errstr\n"; eval { # if it's not already created, the eval will silently ignore this $dbh->do("drop table longtest;"); }; $dbh->do("create table hashtest (id integer, value varchar2(200))"); my %foo; $foo{1} = "bless me"; DBI->trace(9,"c:/trace.txt"); my $sth = $dbh->prepare("insert into hashtest values (?, ?)"); $sth->execute((2, $foo{2})); $sth->execute((1, $foo{1})); my $sth2 = $dbh->prepare("select id, value from hashtest order by id"); $sth2->execute; my @row; while (@row = $sth2->fetch) { print join(', ', @row), "\n"; } $dbh->disconnect; DBD-ODBC-1.61/examples/lob_read.pl0000644000175000017500000000223412250310263016022 0ustar martinmartin# $Id$ # # Example of DBD::ODBC's lob_read # #use Log::Log4perl qw(:easy); #Log::Log4perl->easy_init($DEBUG); #use DBIx::Log4perl; use DBI qw(:sql_types); use strict; use warnings; #my $h = DBI->connect("dbi:ODBC:baugi","sa","easysoft", # {PrintError => 1, RaiseError => 1, PrintWarn => 1}); my $h = DBI->connect; $h->{PrintError} = $h->{RaiseError} = $h->{PrintWarn} = 1; my $s = $h->prepare(q{select 'frederickfrederick'}); $s->execute; $s->bind_col(1, undef, {TreatAsLOB=>1}); $s->fetch; getit($s, SQL_BINARY); $s = $h->prepare(q{select 'frederickfrederick'}); $s->execute; $s->bind_col(1, undef, {TreatAsLOB=>1}); $s->fetch; # NOTE the difference between receiving something as binary and as a char # ODBC's SQLGetData is defined as putting a terminating NUL chr at the # end of strings so even though we ask for 8 we get 7 bytes getit($s, SQL_CHAR); sub getit{ my ($s, $type) = @_; my $len; while($len = $s->odbc_lob_read(1, \my $x, 8, {TYPE => $type})) { print "len=$len, x=$x, ", length($x), "\n"; } print "len at end = $len\n"; my $x; $len = $s->odbc_lob_read(1, \$x, 8); print "len after read $len\n"; } DBD-ODBC-1.61/examples/temp_table.pl0000644000175000017500000000132212250310263016364 0ustar martinmartin# $Id$ # # To access temporary tables in MS SQL Server they need to be created via # SQLExecDirect # use strict; use warnings; use DBI; my $h = DBI->connect(); eval { $h->do(q{drop table martin}); }; $h->do(q{create table martin (a int)}); $h->do('insert into martin values(1)'); my $s; # this long winded way works: #$s = $h->prepare('select * into #tmp from martin', # { odbc_exec_direct => 1} #); #$s->execute; # and this works too: $h->do('select * into #tmp from martin'); # but a prepare without odbc_exec_direct would not work print "NUM_OF_FIELDS: " . DBI::neat($s->{NUM_OF_FIELDS}), "\n"; $s = $h->selectall_arrayref(q{select * from #tmp}); use Data::Dumper; print Dumper($s), "\n"; DBD-ODBC-1.61/examples/testproc3.pl0000755000175000017500000000216312250310263016205 0ustar martinmartin#!perl -w # $Id$ use strict; use DBI; my $dbh = DBI->connect(); eval {$dbh->do("DROP TABLE table1");}; eval {$dbh->do("CREATE TABLE table1 (i INTEGER)");}; eval {$dbh->do("DROP TABLE table2");}; eval {$dbh->do("CREATE TABLE table2 (i INTEGER)");}; eval {$dbh->do("DROP PROCEDURE proc1");}; eval {$dbh->do("CREATE PROCEDURE proc1 \@inputval int AS ". "INSERT INTO table1 VALUES (\@inputval); " . " return \@inputval;");}; if ($@) { print $@, "\n"; } unlink "dbitrace.log" if (-e "dbitrace.log"); $dbh->trace(9, "dbitrace.log"); # Insert a row into table1, either directly or indirectly: my $direct = 0; my $sth1; $sth1 = $dbh->prepare ("{? = call proc1(?) }"); my $output = 0; my $i = 0; while ($i < 4) { # Insert a row into table2 (this fails after an indirect insertion): $sth1->bind_param_inout(1, \$output, 50, DBI::SQL_INTEGER); $sth1->bind_param(2, $i, DBI::SQL_INTEGER); $sth1->execute(); print "$output\n"; $i++; } my $sth = $dbh->prepare("select * from table1"); $sth->execute; my @row; while (@row = $sth->fetchrow_array) { print join(', ', @row), "\n"; } $dbh->disconnect; DBD-ODBC-1.61/examples/proctest1.pl0000755000175000017500000000403412250310263016202 0ustar martinmartin#!perl -w # $Id$ use DBI; use strict; use Data::Dumper; use warnings; my $dbh = DBI->connect(); eval { local $dbh->{PrintError} = 0; $dbh->do("drop procedure PERL_DBD_TESTPRC"); }; $dbh->do("CREATE PROCEDURE PERL_DBD_TESTPRC \@parameter1 int = 22 AS /* SET NOCOUNT ON */ select 1 as some_data select isnull(\@parameter1, 0) as parameter1, 3 as some_more_data print 'kaboom' RETURN(\@parameter1 + 1)"); $dbh->disconnect; sub test { my ($outputTempate, $recurse) = @_; my $queryInputParameter1 = 2222; my $queryOutputParameter = $outputTempate; my $dbh = DBI->connect; local $dbh->{odbc_async_exec} = 1; my $testpass = 0; sub err_handler { my ($state, $msg) = @_; # Strip out all of the driver ID stuff $msg =~ s/^(\[[\w\s]*\])+//; print "===> state: $state msg: $msg\n"; $testpass++; return 0; } local $dbh->{odbc_err_handler} = \&err_handler; my $sth = $dbh->prepare('{? = call PERL_DBD_TESTPRC(?) }'); $sth->bind_param_inout(1, \$queryOutputParameter, 30, { TYPE => DBI::SQL_INTEGER }); $sth->bind_param(2, $queryInputParameter1, { TYPE => DBI::SQL_INTEGER }); $sth->execute(); print '$sth->{Active}: ', $sth->{Active}, "\n"; if (1) { do { for(my $rowRef; $rowRef = $sth->fetchrow_hashref('NAME'); ) { my %outputData = %$rowRef; print 'outputData ', Dumper(\%outputData), "\n"; if($recurse > 0) { test($dbh, --$recurse); } } } while($sth->{odbc_more_results}); } print '$queryOutputParameter: \'', $queryOutputParameter, '\' expected: (', $queryInputParameter1 + 1, ")\n\n"; print "Err handler called $testpass times\n"; } ########################################## ### Test ########################################## unlink("dbitrace.log") if (-e "dbitrace.log"); $dbh->trace(9, "dbitrace.log"); test(0, 0); test(10, 0); test(100, 0); test(' ', 0); test(0, 1); #recusion ########################################## ### Cleanup... ########################################## $dbh->disconnect; DBD-ODBC-1.61/examples/testver.pl0000644000175000017500000000111112250310263015740 0ustar martinmartin#perl -w # $Id$ use DBI; my $dbh = DBI->connect() or die "$DBI::errstr\n"; my %InfoTests = ( 'SQL_DRIVER_NAME', 6, 'SQL_DRIVER_VER', 7, 'SQL_CURSOR_COMMIT_BEHAVIOR', 23, 'SQL_ALTER_TABLE', 86, 'SQL_ACCESSIBLE_PROCEDURES', 20, ); foreach $SQLInfo (sort keys %InfoTests) { $ret = 0; $ret = $dbh->func($InfoTests{$SQLInfo}, GetInfo); print "$SQLInfo ($InfoTests{$SQLInfo}):\t$ret\n"; } DBI->trace(9,"c:/trace.txt"); eval { print "SQL_ROWSET_SIZE = $dbh->{odbc_SQL_ROWSET_SIZE}\n"; }; eval { print "Driver version = $dbh->{odbc_SQL_DRIVER_ODBC_VER}\n"; }; $dbh->disconnect; DBD-ODBC-1.61/examples/driver_complete.pl0000644000175000017500000000115312250310263017435 0ustar martinmartin# $Id$ # shows how (in Windows) you can set the odbc_driver_complete flag, # pass incomplete connection strings and be prompted for completion use strict; use warnings; use DBI; my $h = DBI->connect('dbi:ODBC:DRIVER={SQL Server}', undef, undef, {odbc_driver_complete => 1}) or die $DBI::errstr; if (defined($h->err)) { if ($h->err eq 0) { print "Warning message : ", $h->errstr, "\n"; } elsif ($h->err eq '') { print "Informational message : ", $h->errstr, "\n"; } } print "Out Connection String: ", $h->{odbc_out_connect_string}, "\n"; print "odbc_driver_complete: ", $h->{odbc_driver_complete}, "\n"; DBD-ODBC-1.61/examples/leakcheck.pl0000755000175000017500000000406012250310263016167 0ustar martinmartin#!perl -w # $Id$ use Devel::Leak; use DBI qw(:sql_types); use strict; my $insert_value = 0; my $long = "a" x 1000; sub connecttest { my $dbh = DBI->connect(); $dbh->disconnect; } sub preparetest { my $dbh = DBI->connect(); $dbh->{LongReadLen} = 800; my $sth=$dbh->prepare("select * from PERL_DBD_TEST"); my @row; $sth->execute; while (@row = $sth->fetchrow_array) { } } sub inserttest ($) { my $delete = shift; my $dbh = DBI->connect(); $dbh->{LongReadLen} = 1500; if ($delete) { $dbh->do("delete from perl_dbd_test"); } my $sth=$dbh->prepare("insert into PERL_DBD_TEST (COL_A, COL_C) values ($insert_value, ?)"); my @row; $sth->bind_param(1, $long, SQL_LONGVARCHAR); $sth->execute; $insert_value++; } sub selecttest { my $dbh = DBI->connect(); $dbh->{LongReadLen} = 1500; my $sth=$dbh->prepare("select COL_A, COL_C FROM PERL_DBD_TEST order by col_a"); my @row; $sth->execute; while (@row = $sth->fetchrow_array) { } } my $handle; my $i =0; my $count; my $count2; my $count3; my $count4; my $count5; my $count6; my $count7; my $count8; $count = Devel::Leak::NoteSV($handle); $i = 0; while ($i < 100) { connecttest; $i++; } $count2 = Devel::Leak::CheckSV($handle); $count2 = Devel::Leak::NoteSV($handle); preparetest; $count3 = Devel::Leak::CheckSV($handle); $count3 = Devel::Leak::NoteSV($handle); $i = 0; while ($i < 100) { preparetest; $i++; } $count4 = Devel::Leak::CheckSV($handle); $count4 = Devel::Leak::NoteSV($handle); inserttest(1); $count5 = Devel::Leak::CheckSV($handle); $count5 = Devel::Leak::NoteSV($handle); $i = 0; while ($i < 100) { inserttest(0); $i++; } $count6 = Devel::Leak::CheckSV($handle); $count6 = Devel::Leak::NoteSV($handle); selecttest; $count7 = Devel::Leak::CheckSV($handle); $count7 = Devel::Leak::NoteSV($handle); $i = 0; while ($i < 100) { selecttest; $i++; } $count8 = Devel::Leak::CheckSV($handle); # $count8 = Devel::Leak::NoteSV($handle); print "$count, $count2, $count3, $count4, $count5, $count6, $count7, $count8\n"; DBD-ODBC-1.61/examples/perl-DBD-ODBC.spec0000644000175000017500000000273112250310263016632 0ustar martinmartinName: perl-DBD-ODBC Version: 1.39 Release: 1%{?dist} Summary: ODBC Driver for Perl DBI Group: Development/Libraries License: GPL+ or Artistic URL: https://metacpan.org/module/DBD::ODBC Source0: http://cpan.metacpan.org/authors/id/M/MJ/MJEVANS/DBD-ODBC-1.39.tar.gz BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-root-%(%{__id_u} -n) # build dependencies from MEYA.yml BuildRequires: perl(ExtUtils::MakeMaker) BuildRequires: perl(DBI) >= 1.609 BuildRequires: perl(Test::Simple) >= 0.90 BuildRequires: unixODBC-devel > 2.2.5 Requires: perl(:MODULE_COMPAT_%(eval "`%{__perl} -V:version`"; echo $version)) Requires: perl(DBI) >= 1.6909 Requires: unixODBC > 2.2.5 %{?perl_default_filter} %description Provides ODBC driver for Perls DBI module. %prep %setup -q -n DBD-ODBC-%{version} %build %{__perl} Makefile.PL INSTALLDIRS=vendor OPTIMIZE="$RPM_OPT_FLAGS" make %{?_smp_mflags} %install rm -rf $RPM_BUILD_ROOT make pure_install PERL_INSTALL_ROOT=$RPM_BUILD_ROOT find $RPM_BUILD_ROOT -type f -name .packlist -exec rm -f {} ';' find $RPM_BUILD_ROOT -type f -name '*.bs' -a -size 0 -exec rm -f {} ';' find $RPM_BUILD_ROOT -depth -type d -exec rmdir {} 2>/dev/null ';' chmod -R u+w $RPM_BUILD_ROOT/* %check make test %clean rm -rf $RPM_BUILD_ROOT %files %defattr(-,root,root,-) %doc Changes README examples/ %{perl_vendorarch}/* %exclude %dir %{perl_vendorarch}/auto/ %{_mandir}/man3/*.3* %changelog * Sat Jul 21 2012 Michiel Beijen 1.0 - Initial RPM releaseDBD-ODBC-1.61/examples/testproc4.pl0000644000175000017500000000172312250310263016204 0ustar martinmartin#!perl -w # $Id$ use strict; use DBI; # Connect to the database, and create a table and stored procedure: my $dbh=DBI->connect("dbi:ODBC:PERL_TEST_SQLSERVER", $ENV{DBI_USER}, $ENV{DBI_PASS}, { RaiseError => 1 }) or die "Can't connect"; eval {$dbh->do("DROP PROCEDURE proc1");}; my $proc1 = "CREATE PROCEDURE proc1 ". " \@MaxOrderID1 int OUTPUT, " . " \@MaxOrderID2 varchar(32) OUTPUT AS " . " SELECT \@MaxOrderid1 = 200 + 100 " . " SELECT \@MaxOrderid2 = '200' + '100' ". " return (0) "; eval {$dbh->do ($proc1);}; if ($@) { print "Error creating procedure.\n$@\n"; } # Execute it: if (-e "dbitrace.log") { unlink("dbitrace.log"); } $dbh->trace(9, "dbitrace.log"); my $sth = $dbh->prepare ("{call proc1(?, ?) }"); my $retValue1; my $retValue2; $sth->bind_param_inout(1,\$retValue1, 32); $sth->bind_param_inout(2,\$retValue2, 32); $sth->execute; print "$retValue1, $retValue2\n"; $dbh->disconnect; DBD-ODBC-1.61/examples/testkeys.pl0000755000175000017500000000160512250310263016132 0ustar martinmartin#!/usr/bin/perl -w -I./t # $Id$ # use strict; use DBI qw(:sql_types); # use DBD::ODBC::Const qw(:sql_types); my (@row); my $dbh = DBI->connect('dbi:ODBC:PERL_TEST_ACCESS', '', '', {PrintError=>1}) or exit(0); # ------------------------------------------------------------ my @tables; my $table; my $sth; $| = 1; if (@tables = $dbh->tables) { # print join(', ', @tables), "\n"; foreach $table (@tables) { my $schema = ''; if ($table =~ m/(.*)\.(.*)$/) { $schema = $1; $table = $2; } # DBI->trace(3); $sth = $dbh->func('', $schema, $table, GetPrimaryKeys); if (!$sth) { print "No Primary keys for $schema.$table (", $dbh->errstr, ")\n"; } else { print "$table\n"; my @row; while (@row = $sth->fetchrow_array) { print "\t", join(', ', @row), "\n"; } } } } $dbh->disconnect(); sub nullif ($) { my $val = shift; $val ? $val : "(null)"; }DBD-ODBC-1.61/examples/testcrtable.pl0000644000175000017500000000634112250310263016572 0ustar martinmartin#!/usr/bin/perl -w -I./t # $Id$ use strict; use DBI qw(:sql_types); my (@row); my $dbh = DBI->connect() or exit(0); # ------------------------------------------------------------ my %TypeTests = ( 'SQL_ALL_TYPES' => 0, 'SQL_VARCHAR' => SQL_VARCHAR, 'SQL_CHAR' => SQL_CHAR, 'SQL_INTEGER' => SQL_INTEGER, 'SQL_SMALLINT' => SQL_SMALLINT, 'SQL_NUMERIC' => SQL_NUMERIC, 'SQL_LONGVARCHAR' => SQL_LONGVARCHAR, 'SQL_LONGVARBINARY' => SQL_LONGVARBINARY, ); my $ret; print "\nInformation for DBI_DSN=$ENV{'DBI_DSN'}\n\t", $dbh->get_info(17), "\n"; my $SQLInfo; print "Listing all types\n"; my $sql = "create table PERL_TEST (\n"; my $icolno = 0; use constant { gti_name => 0, gti_type => 1, gti_column_size => 2, gti_prefix => 3, gti_suffix => 4, gti_create_params => 5, gti_nullable => 6 }; my $sth = $dbh->func(0, 'GetTypeInfo'); if ($sth) { my $colcount = $sth->func(1, 0, 'ColAttributes'); # 1 for col (unused) 0 for SQL_COLUMN_COUNT # print "Column count is $colcount\n"; my $i; my @coldescs = (); # column 0 should be an error/blank for ($i = 0; $i <= $colcount; $i++) { my $stype = $sth->func($i, 2, 'ColAttributes'); my $sname = $sth->func($i, 1, 'ColAttributes'); push(@coldescs, $sname); } my @cols = (); my $seen_identity; while (@row = $sth->fetchrow()) { print "$row[gti_name]| ", nullif($row[gti_type]), "| ", nullif($row[gti_column_size]), "| ", nullif($row[gti_prefix]), "| ", nullif($row[gti_suffix]), "| ", nullif($row[gti_create_params]), "| ", nullif($row[gti_nullable]), "| ", "\n"; if ($row[gti_name] =~ /identity/) { next if $seen_identity; # you cannot have multiple identity columns $seen_identity = 1; } if (!($row[gti_name] =~ /auto/)) { my $tmp = " COL_$icolno $row[gti_name]"; if (defined($row[gti_create_params]) && ($row[gti_create_params] =~ /length/ or $row[gti_create_params] =~ /precision/)) { if ($row[gti_name] =~ /\(\)/) { $tmp =~ s/\(\)/($row[gti_column_size])/; } else { $tmp .= "(10)"; #"($row[gti_column_size])" } } push(@cols, $tmp); } $icolno++; } $sql .= join("\n , ", @cols) . ")\n"; $sth->finish; } print $sql; eval { $dbh->do("drop table PERL_TEST"); }; $dbh->do($sql); my @tables = $dbh->tables; my @mtable = grep(/PERL_TEST/, @tables); my ($catalog, $schema, $table) = split(/\./, $mtable[0]); $catalog =~ s/"//g; $schema =~ s/"//g; $table =~ s/"//g; #$table="PERL_DBD_TEST"; print "Getting column info for: $catalog, $schema, $table\n"; my $sth = $dbh->column_info(undef, undef, $table, undef); my @row; print join(', ', @{$sth->{NAME}}), "\n"; while (@row = $sth->fetchrow_array) { # join prints nasty warning messages with -w. There's gotta be a better way... foreach (@row) { $_ = "" if (!defined); } print join(", ", @row), "\n"; } $dbh->disconnect(); sub nullif { my $val = shift; $val ? $val : "(null)"; } DBD-ODBC-1.61/examples/testmoney.pl0000644000175000017500000000521112250310263016300 0ustar martinmartin#!perl -w # $Id$ use strict; use DBI; sub printtable($) { my $dbh = shift; my $sthread = $dbh->prepare("select TypeName, ProvLevel1, ProvLevel2, Action from perl_test_dbd1 order by typename"); $sthread->execute; my @row; while (@row = $sthread->fetchrow_array) { print join(', ', @row), "\n"; } print "-----\n"; } my $dbh = DBI->connect(); $dbh->{RaiseError} = 1; $dbh->{PrintError} = 0; $dbh -> {LongReadLen} = 100000; $dbh -> {LongTruncOk} = 0; eval {$dbh->do("DROP TABLE perl_test_dbd1");}; eval {$dbh->do("DROP TABLE perl_test_dbd2");}; $dbh->do("CREATE TABLE perl_test_dbd1 (" . " [TypeName] [varchar] (50) NOT NULL ," . " [ProvLevel1] [money] NOT NULL ," . " [ProvLevel2] [money] NOT NULL , " . "[Action] [tinyint] NOT NULL) "); $dbh->do("ALTER TABLE perl_test_dbd1 WITH NOCHECK ADD" . " CONSTRAINT [PK_Test1] PRIMARY KEY CLUSTERED" . " ([TypeName])"); $dbh->do("ALTER TABLE perl_test_dbd1 WITH NOCHECK ADD" . " CONSTRAINT [DF_Test1_ProvLevel1] DEFAULT (0.0000) FOR [ProvLevel1]," . " CONSTRAINT [DF_Test1_ProvLevel2] DEFAULT (0.0000) FOR [ProvLevel2]," . " CONSTRAINT [DF_Test1_Action] DEFAULT (0) FOR [Action]"); $dbh->do("CREATE TABLE perl_test_dbd2 (i INTEGER)"); unlink "dbitrace.log" if (-e "dbitrace.log"); $dbh->trace(9, "dbitrace.log"); # Insert a row into table1, either directly or indirectly: my $direct = 0; # check do first. $dbh->do("INSERT INTO Perl_Test_Dbd1 (TypeName,ProvLevel1,ProvLevel2,Action) VALUES ('A',CONVERT(money,0),CONVERT(money,0),0)"); printtable($dbh); my @types = ('B', 'C'); my $typename; my $sth = $dbh->prepare("INSERT INTO Perl_Test_Dbd1 (TypeName,ProvLevel1,ProvLevel2,Action) VALUES (?,0,0,0)"); foreach $typename (@types) { $sth->execute($typename); } printtable($dbh); my @types1 = ('D', 'E'); my @values1_1 = ("9.33", "1,323.01"); my @values1_2 = ("10.33", "1,324.01"); my $i = 0; $sth = $dbh->prepare("INSERT INTO Perl_Test_Dbd1 (TypeName,ProvLevel1,ProvLevel2,Action) VALUES (?,CONVERT(money,?),CONVERT(money,?),0)"); for ($i = 0; $i < @types1; $i++) { $sth->execute($types1[$i], $values1_1[$i], $values1_2[$i]); } printtable($dbh); my @types2 = ('A', 'B', 'C', 'D', 'E'); my @values2_1 = ("1.33", "1,333", "42", "53", "52"); my @values2_2 = ("2.33", "1,324.01", "234", "232", "220"); $i = 0; $sth = $dbh->prepare("update Perl_Test_Dbd1 SET Provlevel1=CONVERT(money,?), provlevel2=CONVERT(money,?) where TypeName=?"); for ($i = 0; $i < @types2; $i++) { $sth->execute($values2_1[$i], $values2_2[$i], $types2[$i]); } printtable($dbh); $dbh->disconnect; DBD-ODBC-1.61/examples/testdestruction.pl0000755000175000017500000000433512250310263017525 0ustar martinmartin#!perl -w # $Id$ use strict; use Getopt::Std; use DBI qw(:sql_types); my $usage = "perl dbtest.pl [-b]. -b binds parameters explicitly.\n"; my @data = ( ["2001-01-01 01:01:01.111", "a" x 12], # "aaaaaaaaaaaa" ["2002-02-02 02:02:02.222", "b" x 114], ["2003-03-03 03:03:03.333", "c" x 251], ["2004-04-04 04:04:04.444", "d" x 282], ["2005-05-05 05:05:05.555", "e" x 131] ); # Get command line options: my %args; getopts ("b", \%args) or die $usage; my $bind = $args{"b"}; # Connect to the database and create the table: my $dbh=DBI->connect() or die "Can't connect"; $dbh->{RaiseError} = 1; $dbh->{LongReadLen} = 800; eval { $dbh->do("DROP TABLE foo"); }; my @types = (SQL_TYPE_TIMESTAMP, SQL_TIMESTAMP); my $type; my @row; foreach $type (@types) { my $sth = $dbh->func($type, "GetTypeInfo"); if ($sth) { @row = $sth->fetchrow(); $sth->finish(); last if @row; } else { # warn "Unable to get type for type $type\n"; } } die "Unable to find a suitable test type for date field\n" unless @row; my $dbname = $dbh->get_info(17); # sql_dbms_name my $datetype = $row[0]; print "Date type = $datetype\n"; $dbh->do("CREATE TABLE foo (i INTEGER, time $datetype, str VARCHAR(4000))"); # Insert records into the database: my $sth1 = $dbh->prepare("INSERT INTO FOO (i,time,str) values (?,?,?)"); for (my $i=0; $i<@data; $i++) { my ($time,$str) = @{$data[$i]}; print "Inserting: $i, $time, string length ".length($str)."\n"; if ($bind) { $sth1->bind_param (1, $i, SQL_INTEGER); $sth1->bind_param (2, $time, SQL_TIMESTAMP); $sth1->bind_param (3, $str, SQL_LONGVARCHAR); $sth1->execute or die ($DBI::errstr); } else { $sth1->execute ($i, $time, $str) or die ($DBI::errstr); } } print "\n"; # Retrieve records from the database, and see if they match original data: my $sth2 = $dbh->prepare("SELECT i,time,str FROM foo"); $sth2->execute or die ($DBI::errstr); while (my ($i,$time,$str) = $sth2->fetchrow_array()) { print "Retrieving: $i, $time, string length ".length($str)."\t"; print "!time " if ($time ne $data[$i][0]); print "!string" if ($str ne $data[$i][1]); print "\n"; } $dbh->disconnect; $dbh = undef; $sth2 = undef; DBD-ODBC-1.61/examples/testconn.pl0000755000175000017500000000444612250310263016122 0ustar martinmartin#!/usr/bin/perl -w # $Id$ use strict; use DBI; my ($sid,$user,$passwd) = ('ZDB','TEST','TEST'); # My little helper function, call with: # The DBI database handle, The SQL code and the parameters. # It will then return the result of the execute (or $sth if it's a dataset). sub sql { my $db = shift; my $code = shift; my @param = @_; chomp $code; # No good reason to have a tailing \n my $sth = $db->prepare_cached($code); if (!$sth) { print "Unable to prepare: ".$DBI::errstr."\nSQL: $code\n"; return undef; } my $res = $sth->execute(@param); if (!$res) { my $par = ''; foreach my $i (@param) { if (defined $i) { $par .= "'$i',"; } else { $par .= "undef,"; } } chop $par; print "Unable to execute: ".$DBI::errstr."\nSQL: $code\nPAR: $par\n"; return undef; } if (ref($res) eq 'ARRAY') { return $res; } else { return $sth; } } sub conn() { # print STDERR "Connecting (to $sid as $user/$passwd)..."; my $db = DBI->connect_cached("dbi:SAP_DB:$sid", $user, $passwd, { AutoCommit=>0, LongReadLen=>10000, } ); unless ($db) { die "Failed to connect to the database $sid as user $user with password $passwd: ".$DBI::errstr; } # print STDERR "Done.\n"; return $db; } sub status($) { my $line = shift; open STATUS, ") { if ($l =~ /^([^:]+):\s+(\S+)/) { $status{$1} = $2; } } close STATUS; return $status{$line}; } my @leak; for (my $i=10; $i<200; $i++) { my $c1 = status('VmSize'); my $db = conn(); print "$db\n"; for (my $j=3;$j<10;$j++) { for (my $k=0;$k<2;$k++) { my $res = sql($db,"select $i+$j from dual"); $res->fetchrow_array; } # my $q1 = status('VmSize'); for (my $k=0;$k<30;$k++) { my $res = sql($db,"select $i+$j from dual"); my ($r) = $res->fetchrow_array; die if $i+$j != $r; } # my $q2 = status('VmSize'); # print STDERR "Query $j leaked: ".($q2-$q1)." kB\n"; } $db->rollback; # $db->disconnect; $db = undef; my $c2 = status('VmSize'); print "Connection $i leaked: ".($c2-$c1)." kB\n"; print "Sleeping for ".(600*$i/60)." minutes\n"; for(my $j=0;$j<60*$i;$j++) { sleep(10); print STDERR "."; } } DBD-ODBC-1.61/examples/sqlserver_supplementary_chrs.pl0000644000175000017500000000301612250310263022307 0ustar martinmartin# $Id$ # # Quick example demonstrating you can insert and retrieve # supplementary characters from MS SQL Server 2012 - it won't work before this version # # See http://msdn.microsoft.com/en-us/library/ms143726.aspx # http://msdn.microsoft.com/en-us/library/bb330962(v=sql.90).aspx # use strict; use warnings; use DBI; use Unicode::UCD 'charinfo'; use Data::Dumper; #use charnames ':full'; use Test::More; use Test::More::UTF8; binmode(STDOUT, ":encoding(UTF-8)"); binmode(STDERR, ":encoding(UTF-8)"); # unicode chr above FFFF meaning it needs a surrogate pair my $char = "\x{2317F}"; my $charinfo = charinfo(0x2317F); print Dumper($charinfo); #print "0x2317F is : ", charnames::viacode(0x2317F), "\n"; my $h = DBI->connect() or BAIL_OUT("Failed to connect"); BAIL_OUT("Not a unicode build of DBD::ODBC") if !$h->{odbc_has_unicode}; $h->{RaiseError} = 1; $h->{ChopBlanks} = 1; $h->{RaiseError} = 1; eval { $h->do('drop table mje'); }; # create table ensuring collation specifieds _SC # for supplementary characters. $h->do(q/create table mje (a nchar(20) collate Latin1_General_100_CI_AI_SC)/); my $s = $h->prepare(q/insert into mje values(?)/); my $inserted = $s->execute("\x{2317F}"); is($inserted, 1, "inserted one row"); my $r = $h->selectall_arrayref(q/select a, len(a), unicode(a), datalength(a) from mje/); print Dumper($r); print "Ordinals of received/sent: ", ord($r->[0][0]), ", ", ord($char), "\n"; print DBI::data_diff($r->[0][0], $char); is($r->[0][0], $char); is($r->[0][1], 1); is($r->[0][2], 143743); done_testing; DBD-ODBC-1.61/examples/Test_Dates_Jun19.pl0000644000175000017500000000642412250310263017305 0ustar martinmartin##!/usr/bin/perl -w use strict; # ------------------------------------------------------------------------ use DBI; print "Program $0 now starting \n"; # ################### Build DSN Less MSSQL Connection Parameters #################################################### # my $DSN = 'driver={SQL Server};Server=markchar; database=orders; uid=orderguy; pwd=element;'; my $dbh = DBI->connect() or die "Can't connect to databese ", DBI::errstr," \n"; ################################################################################################## print "We have connected successfully to the Database \n"; $dbh->{RaiseError} = 1; # let DBI handle the call to die eval { $dbh->do("drop table PERL_DBD_TEST"); $dbh->do("create table PERL_DBD_TEST ( [OrderID] [int] IDENTITY (1, 1) NOT NULL , [CustomerID] [nchar] (5) COLLATE SQL_Latin1_General_CP1_CI_AS NULL , [EmployeeID] [int] NULL , [OrderDate] [datetime] NULL , [RequiredDate] [datetime] NULL , [ShippedDate] [datetime] NULL , [ShipVia] [int] NULL , [Freight] [money] NULL , [ShipName] [nvarchar] (40) COLLATE SQL_Latin1_General_CP1_CI_AS NULL , [ShipAddress] [nvarchar] (60) COLLATE SQL_Latin1_General_CP1_CI_AS NULL , [ShipCity] [nvarchar] (15) COLLATE SQL_Latin1_General_CP1_CI_AS NULL , [ShipRegion] [nvarchar] (15) COLLATE SQL_Latin1_General_CP1_CI_AS NULL , [ShipPostalCode] [nvarchar] (10) COLLATE SQL_Latin1_General_CP1_CI_AS NULL , [ShipCountry] [nvarchar] (15) COLLATE SQL_Latin1_General_CP1_CI_AS NULL )"); }; # Jeff... IF you comment this out, you should see the following error # $dbh->{odbc_default_bind_type} = 12; # SQL_VARCHAR for # May 18, 2003 compatibility with older DBD::ODBC # $dbh->{odbc_default_bind_type} = 0; # **DEFAULT won't work here*** # DBD::ODBC::st execute failed: [Microsoft][ODBC SQL Server Driver]Invalid character value for cast specification (SQL-22018)(DBD: st_execute/SQLExecute err=-1) # DBD::ODBC::st execute failed: [Microsoft][ODBC SQL Server Driver]Invalid character value for cast specification (SQL-22018)(DBD: st_execute/SQLExecute err=-1) # # # # # # # # # Prepare the Insert into Order Table Statement # # # # # # # # # # # my $insert_order_stm = $dbh->prepare ( " INSERT INTO PERL_DBD_TEST ( CustomerID, EmployeeID, OrderDate, RequiredDate, ShippedDate, ShipVia, ShipName, ShipAddress, ShipCity, ShipRegion, ShipPostalCode, ShipCountry ) VALUES (?,?,?,?,?,?,?,?,?,?,?,?)" ); # $dbh->{odbc_default_bind_type} = 0; # SQL_VARCHAR for $insert_order_stm->bind_param(1, 0001); $insert_order_stm->bind_param(2, 9); $insert_order_stm->bind_param(3, "{d '2003-05-16'}" ); $insert_order_stm->bind_param(4, "{d '2003-06-25'}" ); $insert_order_stm->bind_param(5, "{d '2003-06-22'}" ); $insert_order_stm->bind_param(6, 1); $insert_order_stm->bind_param(7, "Cust1"); $insert_order_stm->bind_param(8, "addr 1"); $insert_order_stm->bind_param(9, "city"); $insert_order_stm->bind_param(10, "region"); $insert_order_stm->bind_param(11, "999"); $insert_order_stm->bind_param(12, "USA"); my $rc = $insert_order_stm->execute; print "Last SQL Return Code from insert to Order Table = $rc \n" ; print "Program $0 now ending \n"; $dbh->disconnect; DBD-ODBC-1.61/examples/set_nocount_on.pl0000644000175000017500000000176612250310263017320 0ustar martinmartin# $Id$ # shows what happens in MS SQL Server when set nocount on is set # according to MS setting it on reduces network traffic since a count # off the affected/selected rows is not returned to the driver. use strict; use warnings; use DBI; my $h = DBI->connect() or die $DBI::errstr; if (defined($h->err)) { if ($h->err eq 0) { print "Warning message : ", $h->errstr, "\n"; } elsif ($h->err eq '') { print "Informational message : ", $h->errstr, "\n"; } } print "Out Connection String: ", $h->{odbc_out_connect_string}, "\n"; eval { local $h->{PrintError} = 0; $h->do(q/drop table nocount_test/); }; $h->do(q/create table nocount_test (a integer)/); my $s = $h->prepare(q/insert into nocount_test values(?)/); foreach (1..5) { $s->execute($_); } $s = $h->prepare(q/update nocount_test set a = a + 1/); $s->execute; print "Rows affected: ", $s->rows, "\n";; $s = $h->prepare(q/set nocount on;update nocount_test set a = a + 1/); $s->execute; print "Rows affected: ", $s->rows, "\n";; DBD-ODBC-1.61/examples/getinfo.pl0000644000175000017500000001401312250310263015704 0ustar martinmartin#perl -w # $Id$ use DBI; my $dbh = DBI->connect() or die "$DBI::errstr\n"; my %InfoTests = ( 'SQL_MAX_DRIVER_CONNECTTIONS', 0, 'SQL_MAX_CONCURRENT_ACTIVITIES', 1, 'SQL_DATA_SOURCE_NAME', 2, 'SQL_DRIVER_NAME', 6, 'SQL_DRIVER_VER', 7, 'SQL_FETCH_DIRECTION', 8, 'SQL_ODBC_API_CONFORMANCE', 9, 'SQL_ODBC_VER', 10, 'SQL_ROW_UPDATES', 11, 'SQL_ODBC_SAG_CLI_CONFORMANCE', 12, 'SQL_SERVER_NAME', 13, 'SQL_SEARCH_PATTERN_ESCAPE', 14, 'SQL_ODBC_SQL_CONFORMANCE', 15, 'SQL_DBMS_NAME', 17, 'SQL_DBMS_VER', 18, 'SQL_ACCESSIBLE_PROCEDURES', 20, 'SQK_PROCEDURES', 21, 'SQL_CONCAT_NULL_BEHAVIOR', 22, 'SQL_CURSOR_COMMIT_BEHAVIOR', 23, 'SQL_CURSOR_ROLLBACK_BEHAVIOR', 24, 'SQL_DATA_SOURCE_READ_ONLT', 25, 'SQL_DEFAULT_TXN_ISOLATION', 26, 'SQL_EXPRESSIONS_IN_ORDERBY', 27, 'SQL_IDENTIFIER_CASE', 28, 'SQL_IDENTIFIER_QUOTE_CHAR', 29, 'SQL_MAX_COLUMN_NAME_LEN', 30, 'SQL_MAX_CURSOR_NAME_LEN', 31, 'SQL_MAX_SCHEMA_NAME_LEN', 32, 'SQL_MAX_PROCEDURE_NAME_LEN', 33, 'SQL_MAX_CATALOG_NAME_LENGTH', 34, 'SQL_MAX_TABLE_NAME_LEN', 35, 'SQL_MULT_RESULT_SETS', 36, 'SQL_MULTIPLE_ACTIVE_TXN', 37, 'SQL_OUTER_JOINS', 38, 'SQL_OWNER_TERM', 39, 'SQL_PROCEDURE_TERM', 40, 'SQL_QUALIFIER_NAME_SEPARATOR', 41, 'SQL_QUALIFIER_TERM', 42, 'SQL_SCROLL_CONCURRENCY', 43, 'SQL_SCROLL_OPTIONS', 44, 'SQL_TABLE_TERM', 45, 'SQL_TXN_CAPABLE', 46, 'SQL_USER_NAME', 47, 'SQL_CONVERT_FUNCTIONS', 48, 'SQL_NUMERIC_FUNCTIONS', 49, 'SQL_STRING_FUNCTIONS', 50, 'SQL_SYSTEMS_FUNCTIONS', 51, 'SQL_TIMEDATE_FUNCTIONS', 52, 'SQL_CONVERT_BIGINT', 53, 'SQL_CONVERT_BINARY', 54, 'SQL_CONVERT_BIT', 55, 'SQL_CONVERT_CHAR', 56, 'SQL_CONVERT_DATE', 57, 'SQL_CONVERT_DECIMAL', 58, 'SQL_CONVERT_DOUBLE', 59, 'SQL_CONVERT_FLOAT', 60, 'SQL_CONVERT_INTEGER', 61, 'SQL_CONVERT_LONGVARCHAR', 62, 'SQL_CONVERT_NUMERIC', 63, 'SQL_CONVERT_REAL', 64, 'SQL_CONVERT_SMALLINT', 65, 'SQL_CONVERT_TIME', 66, 'SQL_CONVERT_TIMESTAMP', 67, 'SQL_CONVERT_TINYINT', 68, 'SQL_CONVERT_VARBINARY', 69, 'SQL_CONVERT_VARCHAR', 70, 'SQL_CONVERT_LONGVARBINARY', 71, 'SQL_TXN_ISOLATION_OPTION', 72, 'SQL_INTEGRITY', 73, 'SQL_CORRELATION_NAME', 74, 'SQL_NON_NULLABLE_COLUMNS', 75, 'SQL_DRIVER_ODBC_VER', 77, 'SQL_LOCK_TYPES', 78, 'SQL_POS_OPERATIONS', 79, 'SQL_POSITIONED_STATEMENTS', 80, 'SQL_GETDATA_EXTENSIONS', 81, 'SQL_BOOKMARK_PERSISTENCE', 82, 'SQL_STATIC_SENSITIVITY', 83, 'SQL_FILE_USAGE', 84, 'SQL_NULL_COLLATION', 85, 'SQL_ALTER_TABLE', 86, 'SQL_COLUMN_ALIAS', 87, 'SQL_GROUP_BY', 88, 'SQL_KEYWORDS', 89, 'SQL_ORDER_BY_COLUMNS_IN_SELECT', 90, 'SQL_OWNER_USAGE', 91, 'SQL_QUALIFIER_USAGE', 92, 'SQL_QUOTED_IDENTIFIER_CASE', 93, 'SQL_SPECIAL_CHARACTERS', 94, 'SQL_SUBQUERIES', 95, 'SQL_UNION', 96, 'SQL_MAX_COLUMNS_IN_GROUP_BY', 97, 'SQL_MAX_COLUMNS_IN_INDEX', 98, 'SQL_MAX_COLUMNS_IN_ORDER_BY', 99, 'SQL_MAX_COLUMNS_IN_SELECT', 100, 'SQL_MAX_COLUMNS_IN_TABLE', 101, 'SQL_MAX_INDEX_SIZE', 102, 'SQL_MAX_ROW_SIZE_INCLUDES_LONG', 103, 'SQL_MAX_ROW_SIZE', 104, 'SQL_MAX_STATEMENT_LEN', 105, 'SQL_MAX_TABLES_IN_SELECT', 106, 'SQL_MAX_USER_NAME_LENGTH', 107, 'SQL_MAX_CHAR_LITERAL_LEN', 108, 'SQL_TIMEDATE_ADD_INTERVALS', 109, 'SQL_TIMEDATE_DIFF_INTERVALS', 110, 'SQL_NEED_LONG_DATA_LEN', 111, 'SQL_MAX_BINARY_LITERAL_LEN', 112, 'SQL_LIKE_ESCAPE_CLAUSE', 113, 'SQL_QUALIFIER_LOCATION', 114, 'SQL_OJ_CAPABILITIES', 115, 'SQL_ACTIVE_ENVIRONMENTS', 116, 'SQL_ALTER_DOMAIN', 117, 'SQL_SQL_CONFORMANCE', 118, 'SQL_DATETIME_LITERALS', 119, 'SQL_BATCH_ROW_COUNT', 120, 'SQL_BATCH_SUPPORT', 121, 'SQL_CONVERT_WCHAR', 122, 'SQL_CONVERT_INTERVAL_DAY_TIME', 123, 'SQL_CONVERT_INTERVAL_YEAR_MONTH', 124, 'SQL_CONVERT_WLONGVARCHAR', 125, 'SQL_CONVERT_WVARCHAR', 126, 'SQL_CREATE_ASSERTION', 127, 'SQL_CREATE_CHARARCTER_SET', 128, 'SQL_CREATE_COLLATION', 129, 'SQL_CREATE_DOMAIN', 130, 'SQL_CREATE_SCHEMA', 131, 'SQL_CREATE_TABLE', 132, 'SQL_CREATE_TRANSLATION', 133, 'SQL_CREATE_VIEW', 134, 'SQL_DROP_ASSERTION', 136, 'SQL_DROP_CHARACTER_SET', 137, 'SQL_DROP_COLLATION', 138, 'SQL_DROP_DOMAIN', 139, 'SQL_DROP_SCHEMA', 140, 'SQL_DROP_TABLE', 141, 'SQL_DROP_TRANSLATION', 142, 'SQL_DROP_VIEW', 143, 'SQL_DYNAMIC_CURSOR_ATTRIBUTES1', 144, 'SQL_DYNAMIC_CURSOR_ATTRIBUTES2', 145, 'SQL_FORWARD_ONLY_CURSOR_ATTRIBUTES1', 146, 'SQL_FORWARD_ONLY_CURSOR_ATTRIBUTES2', 147, 'SQL_INDEX_KEYWORDS', 148, 'SQL_INFO_SCHEMA_VIEWS', 149, 'SQL_KEYSET_CURSOR_ATTRIBUTES1', 150, 'SQL_KEYSET_CURSOR_ATTRIBUTES2', 151, 'SQL_ODBC_INTERFACE_CONFORMANCE', 152, 'SQL_PARAM_ARRAY_ROW_COUNTS', 153, 'SQL_PARAM_ARRAY_SELECTS', 154, 'SQL_SQL92_DATETIME_FUNCTIONS', 155, 'SQL_SQL92_FOREIGN_KEY_DELETE_RULE', 156, 'SQL_SQL92_FOREIGN_KEY_UPDATE_RULE', 157, 'SQL_SQL92_GRANT', 158, 'SQL_SQL92_NUMERIC_VALUE_FUNCTIONS', 159, 'SQL_SQL92_PREDICATES', 160, 'SQL_SQL92_RELATIONAL_JOIN_OPERATORS', 161, 'SQL_SQL92_REMOKE', 162, 'SQL_SQL92_ROW_VALUE_CONSTRUCTOR', 163, 'SQL_SQL92_STRING_FUNCTIONS', 164, 'SQL_SQL92_VALUE_EXPRESSIONS', 165, 'SQL_STANDARD_CLI_CONFORMANCE', 166, 'SQL_STATUC_CURSOR_ATTRIBUTES1', 167, 'SQL_STATUC_CURSOR_ATTRIBUTES2', 168, 'SQL_AGGREGATE_FUNCTIONS', 169, 'SQL_DDL_INDEX', 170, 'SQL_DM_VER', 171, 'SQL_DTC_TRANSITION_COST', 1750, 'SQL_XOPEN_CLI_YEAR', 10000, 'SQL_CURSOR_SENSITIVITY', 10001, 'SQL_DESCRIBE_PARAMETER', 10002, 'SQL_CATALOG_NAME', 10003, 'SQL_COLLATION_SEQ', 10004, 'SQL_MAX_IDENTIFIER_LEN', 10005, 'SQL_ASYNC_MODE', 10021, 'SQL_MAX_ASYNC_CONCURRENT_STATEMENTS', 10022, ); foreach $SQLInfo (sort keys %InfoTests) { $ret = 0; $ret = $dbh->func($InfoTests{$SQLInfo}, GetInfo); print "$SQLInfo ($InfoTests{$SQLInfo}):\t$ret\n"; } $dbh->disconnect; DBD-ODBC-1.61/examples/testspmulti.pl0000755000175000017500000000177112250310263016660 0ustar martinmartin#!perl.exe -w # $Id$ use strict; use DBI; my ($instance, $user, $password, $db) = ('gaccardo\test', 'sa', 'gaccardo', 'testdb'); my $dbh = DBI->connect("dbi:ODBC:PERL_TEST_SQLSERVER", $ENV{DBI_USER}, $ENV{DBI_PASS}, {RaiseError => 1, PrintError => 0}) or die "\n\nCannot connect.\n\n$DBI::errstr\n"; $dbh->{LongReadLen} = 65536; unlink 'dbitrace.log' if (-e 'dbitrace.log') ; DBI->trace(9, 'dbitrace.log'); my @tables = $dbh->tables(); # print "Tables: ", join(', ', @tables), "\n"; my $table; foreach $table (@tables) { # $table =~ s/^"/[/; # $table =~ s/"\./]./; # $table print "$table: \n"; my $sth = $dbh->prepare("exec sp_depends '$table'"); eval { $sth->execute(); }; if (!$@) { do { my @query_results; while (@query_results = $sth->fetchrow_array) { print join (', ', @query_results) . "\n"; } } while ( $sth->{odbc_more_results} ); if ($DBI::err) { print "\n$DBI::errstr\n " } } else { print "$@\n"; } }DBD-ODBC-1.61/examples/randombind.pl0000755000175000017500000000552412250310263016400 0ustar martinmartin#!perl -w # $Id$ use strict; use DBI qw(:sql_types); my $dbh=DBI->connect() or die "Can't connect"; $dbh->{RaiseError} = 1; $dbh->{LongReadLen} = 800; eval { $dbh->do("drop table foo"); }; my $dbname = $dbh->get_info(17); # sql_dbms_name my $txttype = "varchar(4000)"; $txttype = "TEXT" if ($dbname =~ /ACCESS/) ; $dbh->do("Create table foo (id integer not null primary key, txt $txttype)"); my $sth = $dbh->prepare("INSERT INTO FOO (ID, TXT) values (?, ?)"); my $sth2 = $dbh->prepare("select id, txt from foo where id = ?"); my @txtinserted; my @lengths = ( 4, 4, 7, 7, 7, 7, 7, 7, 12, 12, 12, 7, 7, 7, 7, 7, 7, 7, 7, 7, 88, 7, 7, 7, 100, 100, 12, 7, 183, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 114, 251, 282, 281, 276, 131, 284, 144, 131, 144, 144, 131, 284, 144, 251, 284, 144, 284, 3, 284, 276, 284, 276, 3, 284, 144, 284, 7, 131, 144, 284, 284, 276, 131, 131, 114, 122 ); my $tmp; my $longstr = "abcdefghijklmnopqrstuvwxyz"; my $i = 0; while ($i < 10) { $longstr .= $longstr; $i++; } $i = 0; while ($i <= $#lengths) { $tmp = substr($longstr, $i, $lengths[$i]); die "substr error? $tmp, $lengths[$i]\n" unless length($tmp) == $lengths[$i]; push(@txtinserted, $tmp); if (1) { $sth->bind_param(1, $i, SQL_INTEGER); $sth->bind_param(2, $tmp, SQL_LONGVARCHAR); $sth->execute; } else { $sth->execute($i, $tmp); } # print "$i: $lengths[$i]\n"; $i++; } print "Inserted $i records.\n"; $i = 0; while ($i <= $#lengths) { if (length($txtinserted[$i]) != $lengths[$i]) { print "Test Mismatch @ $i, $txtinserted[$i] != $lengths[$i]\n"; } $sth2->execute($i); my @row = $sth2->fetchrow_array(); $sth2->finish; print "Checking row $row[0]\n"; if ($txtinserted[$i] ne $row[1]) { print "Mismatch @ $i, ", length($txtinserted[$i]), " != ", length($row[1]), ": \n", $txtinserted[$i], "\n$row[0]\n"; } # print "$i: $txtinserted[$i]\n"; $i++; } print "Checked $i records\n"; $dbh->disconnect; DBD-ODBC-1.61/examples/sqltmptabs.pl0000644000175000017500000000132212250310263016442 0ustar martinmartinuse DBI; # $Id$ # For MS SQL Server temp tables are only visible if you create them with "do" my $dbh = DBI->connect($ENV{DBI_DSN}, $ENV{DBI_USER}, $ENV{DBI_PASS}, { RaiseError => 1}); my $sth; my $sql = 'CREATE TABLE #foo (id INT PRIMARY KEY, val CHAR(4))'; $dbh->do($sql); # $sth = $dbh->prepare($sql); # $sth->execute; # $sth->finish; print "Now inserting!\n"; $sth = $dbh->prepare("INSERT INTO #foo (id, val) VALUES (?, ?)"); my $sth2 = $dbh->prepare("INSERT INTO #foo (id, val) VALUES (?, ?)"); $sth2->execute(1, 'foo'); $sth2->execute(2, 'bar'); $sth = $dbh->prepare("Select id, val from #foo"); $sth->execute; my @row; while (@row = $sth->fetchrow_array) { print join(', ', @row), "\n"; } $dbh->disconnect; DBD-ODBC-1.61/examples/money_test.cgi0000644000175000017500000000737412250310263016602 0ustar martinmartin#!c:/programme/perl/bin/perl.exe -w # $Id$ use strict; # activate for programming/debugging only use warnings; # activate for programming/debugging only use CGI::Carp qw(fatalsToBrowser); # activate for programming/debugging only use CGI 'param','redirect'; use DBI; # config variables our $dsn='dsn'; # Database DSN our $dbuser='user'; # Database User our $dbpass='pass'; # Database Password our $dbtable='test1'; # ProvTable ################################################################################################# $|=1; my $dbh = DBI->connect("DBI:ODBC:$dsn", $dbuser, $dbpass, { Taint =>1 }) || die "$DBI::errstr"; $dbh->{RaiseError} = 1; # activate for programming/debugging only $dbh->{PrintError} = 1; # activate for programming/debugging only $dbh -> {LongReadLen} = 100000; $dbh -> {LongTruncOk} = 0; #$dbh -> {odbc_default_bind_type} = 12; # SQL_VARCHAR my $action=param('ACTION') || ''; if ($action=~/[^\w]/) { die "bad chars in parameter!" } if ($action eq 'SAVE_PROV') {&save_prov;} else {&prov;} sub prov { print "content-type: text/html\n\n"; print "Title\n"; print "


\n\n\n"; print "
\n"; print "\n"; # MsSQL my $sth0 = $dbh->prepare(" SELECT ISNULL(TypeName,'') AS TypeName, ISNULL(ProvLevel1,0.00) AS ProvLevel1, ISNULL(ProvLevel2,0.00) AS ProvLevel2, ISNULL(Action,0) AS Action FROM $dbtable with (NoLock) ORDER BY Action "); my $rv0 = $sth0->execute(); while (my $ref0 = $sth0->fetchrow_hashref()) { if ($ref0->{'Action'}==0) { print "\n"; } else { print "\n"; } } $sth0->finish(); print "
 Text  Level1  Level2  
 $ref0->{'TypeName'}      
 V: $ref0->{'TypeName'}      


"; print "
"; }#/// sub save_prov { my $sth8 = $dbh->prepare("SELECT ISNULL(TypeName,'') AS TypeName FROM $dbtable with (NoLock)"); # MsSQL my $rv8 = $sth8->execute(); while (my $ref8 = $sth8->fetchrow_hashref()) { my $name=$ref8->{'TypeName'}; my $level1=param("LEVEL1_$name") || '0'; $level1=~s/\,/\./i; $level1=~s/[^\d.]//i; my $level2=param("LEVEL2_$name") || '0'; $level2=~s/\,/\./i; $level2=~s/[^\d.]//i; # MsSQL my $sth9 = $dbh->prepare(" UPDATE $dbtable SET ProvLevel1=CONVERT(money,?), ProvLevel2=CONVERT(money,?) WHERE TypeName=? "); my $rv9=$sth9->execute($level1,$level2,$name); #$sth9->bind_param(n,undef,SQL_VARCHAR); # tell DBD-ODBC this is a char #$sth9->finish(); } $sth8->finish(); my $location="./money_test.cgi"; print redirect(-uri=>$location); print "Content-Type: text/html\n"; print "\n"; print "Redirect"; print "If your browser does not support redirection, please click "; print "here"; print ""; }#/// $dbh->disconnect(); DBD-ODBC-1.61/examples/thrtest.pl0000644000175000017500000001531212250310263015751 0ustar martinmartin # $Id$ use 5.008 ; use threads ; use threads::shared ; use strict ; use vars qw{$id $mode $login $userID $authHandler $passwd $authMode $data $dsn} ; our $dsn : shared = $ENV{DBIDSN} || 'dbi:Oracle:host=wingr1;sid=ora81' || 'dbi:ODBC:test' ; our $orashr : shared = '' ; use DBI ; use Carp ; use Carp::Heavy ; sub dotests { my ($doerr, $count) = @_ ; my $dbh = undef ; my $cursor1 = undef ; my $cursor2 = undef ; my $cursor3 = undef ; my $action ; my $tid = threads -> tid() ; my $concnt = 0 ; my $discnt = 0 ; my $half = $count / 2 ; print "start tid = $tid\n" ; #DBI -> trace (3) ; $login = '' ; $authHandler = '' ; while (!defined($count) || $count--) { if (!$dbh) { print "connect #$concnt tid = $tid\n" ; $dbh = DBI -> connect ($dsn, 'scott', 'tiger', {'PrintError' => 1, ora_init_mode => 3, ora_dbh_share => \$orashr}) or die "Cannot connect to $ENV{DBIDSN}" ; $concnt++ ; #print "create from tid = $tid\n" ; #my $t = threads->create('dotests', $doerr, $count) ; #print "created ", $t -> tid, " from tid = $tid\n" ; } my $action = int(rand() * 10) ; print "--> #$tid action = $action count = $count doerr = $doerr\n" ; if ($action == 0 && $doerr ) { # create a syntax error my $sth = $dbh->prepare("SELECT userID, authHandler FROM") ; die "no error" if (!$DBI::errstr) ; } elsif ($action == 1 && !$cursor1) { $cursor1 -> finish if ($cursor1) ; $cursor1 = $dbh->prepare("SELECT userID, authHandler, password FROM thrtest1 WHERE login = ? and locked IS NULL ORDER BY password"); die "db error $DBI::errstr" if (!$doerr && $DBI::errstr) ; } elsif ($action == 2 && !$cursor2) { $cursor2 -> finish if ($cursor2) ; $cursor2 = $dbh->prepare("SELECT authMode, data FROM thrtest2 WHERE handlerID = ?"); die "db error $DBI::errstr" if (!$doerr && $DBI::errstr) ; } elsif ($action == 3 && !$cursor3) { $cursor3 -> finish if ($cursor3) ; $cursor3 = $dbh->prepare("UPDATE thrtest2 SET lastLogin = now() WHERE userID = ?"); die "db error $DBI::errstr" if (!$doerr && $DBI::errstr) ; } elsif ($action == 4 && $cursor1 && $login) { #$cursor1 -> finish if ($cursor1) ; #$cursor1 = $dbh->prepare("SELECT userID, authHandler, password # FROM thrtest1 WHERE login = ? and locked IS NULL # ORDER BY password"); # $cursor1->execute($login) ; $cursor1->bind_columns(\($userID, $authHandler, $passwd)); $cursor1->fetch; die "**** user is = $userID, should = $id" if ($id ne $userID) ; die "**** db error $DBI::errstr" if (!$doerr && $DBI::errstr) ; } elsif ($action == 5 && $authHandler && $cursor2) { # $cursor2 -> finish if ($cursor2) ; # $cursor2 = $dbh->prepare("SELECT authMode, data FROM # thrtest2 WHERE handlerID = ?"); $cursor2->execute($authHandler) ; $cursor2->bind_columns(\($authMode, $data)); $cursor2->fetch; die "**** mode is = $authMode, should = $mode for $authHandler (login=$login)" if ($mode ne $authMode) ; die "**** db error $DBI::errstr" if (!$doerr && $DBI::errstr) ; } elsif ($action == 6) { $cursor3 = undef ; } elsif ($action == 7) { $cursor2 = undef ; } elsif ($action == 8) { $cursor1 = undef ; } elsif ($action == 9) { $cursor3 = undef ; $cursor2 = undef ; $cursor1 = undef ; if ($discnt++ % 10 == 0) { $dbh ->disconnect ; die "db error $DBI::errstr" if (!$doerr && $DBI::errstr) ; $dbh = undef ; } my $i = int(rand() * 3) ; $login = ('richter', 'test', 'XX')[$i] ; $id = ('gr', 'tt', 'xx')[$i] ; $mode = ('Windows', 'Windows', '')[$i] ; $authHandler = '' ; print "test login = $login, id = $id, mode = $mode\n" ; if ($count < $half) { threads->create('dotests', $doerr, $count) ; $half = 0 ; } } threads -> yield () ; my @num = threads->list() ; print "#" . scalar(@num) . "\n" ; } threads->create('dotests', $doerr, $count) ; } #------------------------------------------------------------- # # create table thrtest1 & thrtest2 and put some test data in # my $dbh = DBI -> connect ($ENV{DBIDSN}, 'scott', 'tiger') or die "Cannot connect to $ENV{DBIDSN}" ; eval { $dbh -> do ('drop table thrtest1') ; $dbh -> do ('drop table thrtest2') ; } ; my $c = q{ create table thrtest1 (userID varchar(80), authHandler varchar(80), password varchar(80), login varchar(80), lastLogin date, locked int) } ; $dbh -> do ($c) ; my $c = q{ create table thrtest2 (handlerID varchar(80), authMode varchar(80), data varchar(80)) } ; $dbh -> do ($c) ; $dbh -> do ("insert into thrtest1 values ('gr', 'w32', '', 'richter', NULL, NULL)") ; $dbh -> do ("insert into thrtest1 values ('tt', 'w32', '', 'test', NULL, NULL)") ; $dbh -> do ("insert into thrtest1 values ('xx', '', 'xx', 'XX', NULL, NULL)") ; $dbh -> do ("insert into thrtest2 values ('w32', 'Windows', 'mond:mond:ecos')") ; #$dbh -> disconnect ; threads->create('dotests', 1, 20) ; threads->create('dotests', 1, 20) ; threads->create('dotests', 1) ; threads->create('dotests', 1) ; threads->create('dotests', 1) ; threads->create('dotests', 1) ; threads->create('dotests', 1) ; threads->create('dotests', 1) ; threads->create('dotests', 1) ; threads->create('dotests', 1) ; threads->create('dotests', 1) ; threads->create('dotests', 1) ; threads->create('dotests', 1) ; threads->create('dotests', 1) ; threads->create('dotests', 0) ; threads->create('dotests', 0) ; threads->create('dotests', 0) ; threads->create('dotests', 0) ; threads->create('dotests', 0) ; threads->create('dotests', 0) ; threads->create('dotests', 0) ; threads->create('dotests', 0) ; threads->create('dotests', 0, 20) ; threads->create('dotests', 0, 20) ; #-> join; #threads->create('dotests', 0) ; #threads->create('dotests', 0) ; dotests () ; DBD-ODBC-1.61/examples/testclob.pl0000644000175000017500000000156512250310263016100 0ustar martinmartin#!perl -w # $Id$ use DBI; my $dbh = DBI->connect() or die "$DBI::errstr\n"; $dbh->{PrintError} = 0; eval { # if it's not already created, the eval will silently ignore this $dbh->do("drop table longtest;"); }; $dbh->{RaiseError} = 1; $dbh->do("create table longtest (id integer primary key, value CLOB)"); my %foo; $foo{2} = "Hello there."; $foo{1} = "This is a test of CLOB. "x200; my $tracefile = "dbitrace.log"; if (-e $tracefile) { unlink($tracefile); } DBI->trace(9,$tracefile); my $sth = $dbh->prepare("insert into longtest values (?, ?)"); $sth->execute((2, $foo{2})); $sth->execute((1, $foo{1})); $dbh->{LongReadLen} = 2000000; my $sth2 = $dbh->prepare("select id, value from longtest order by id"); $sth2->execute; my @row; while (@row = $sth2->fetchrow_array) { print join(', ', @row), "\n"; } $dbh->disconnect; DBD-ODBC-1.61/examples/cancel_big_fetch.pl0000644000175000017500000000233112250310263017470 0ustar martinmartin# $Id$ # demonstrates that not fetching all of a result-set makes can make # a difference. More recent MS SQL Server drivers are better at this so don't # be surprised if this shows no difference between the 2 variations. # However, in the past, cancelling a big select when you have not selected # all rows has made a huge difference as MS SQL Server sees the cancel and # stops sending the result-set. use DBI; use strict; use warnings; use Benchmark; use Data::Dumper; my $h = DBI->connect(); if (@ARGV) { local $h->{PrintError} = 0; eval { $h->do(q/drop table mjebig/); }; $h->do(q/create table mjebig(a varchar(255))/); $h->begin_work; # quicker than autocommit my $val = 'a' x 255; my $s = $h->prepare(q/insert into mjebig values(?)/); foreach (1..100000) { $s->execute($val); } $h->commit; } sub one { my $s = $h->prepare(q/select * from mjebig/); $s->execute; $s->fetch; $s = undef; } sub two { my $s = $h->prepare(q/select * from mjebig/); $s->execute; $s->fetch; my $x = $s->cancel; #print Dumper(\$x), "\n"; $s = undef; } timethese(1000, { 'without_cancel' => sub{one()}, 'with_cancel' => sub{two()} }); DBD-ODBC-1.61/examples/northwind.pl0000644000175000017500000000527212250310263016274 0ustar martinmartin#!perl -w # $Id$ # # Perl script that talks with the Northwinds database using an # ODBC DSN of Northwind. # use DBI qw(:sql_types); use Data::Dumper; use strict; my $dbh = DBI->connect( "dbi:ODBC:Northwind", "", "", {RaiseError => 1, PrintError => 1, AutoCommit => 1} ) or die "Unable to connect: " . $DBI::errstr . "\n"; # OK, connected, now select from Customers table. my $sel = $dbh->prepare( "select * from Customers where CustomerID like ?" ); $sel->execute( qq{A%} ); print "Driver : " . $dbh->{Driver}->{Name} . "\n"; print "SQL Statement: " . $sel->{Statement} . "\n"; print "Table contains: " . $sel->{NUM_OF_FIELDS} . " columns.\n"; print "Column names are: " . join( "\n\t", @{$sel->{NAME}}, "" ); print "Number of Params: " . $sel->{NUM_OF_PARAMS} . "\n"; print "\n"; my @row; { local $^W = 0; print join( "\t", @{$sel->{NAME}}, "\n"); while( @row = $sel->fetchrow_array ) { print join( "\t",@row, "\n"); } } print "\n"; # Remove sample row, if needed. $dbh->do( qq{delete from Customers where CustomerID = 'TAL'} ); # Insert a new customer. #Column names are: CustomerID #CompanyName #ContactName #ContactTitle #Address #City #Region #PostalCode #Country #Phone #Fax print "Inserting new customer: "; $ins = $dbh->prepare( qq{insert into Customers values ( ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ? )} ); my @tal = ( "TAL", "STL", "ThomasAL", "STL", "Thomas Lowery", "Manager", "74 Washington Ave.", "Battle Creek", "Northeast", 49017, "USA", "616.961.4000", "616.961.4000" ); print $ins->execute(@tal) . "\n"; # Select new customer. print "Select new customer: "; $sel->execute( qq{TAL%} ); print "\n"; { local $^W = 0; print join( "\t", @{$sel->{NAME}}, "\n"); while( @row = $sel->fetchrow_array ) { print join( "\t",@row, "\n"); } } $ins->finish; print "\n"; # Change new customer. print "Update customers: "; $upd = $dbh->prepare( qq{update Customers set CompanyName = 'TAL' where CustomerID = 'TAL'} ); print $upd->execute . "\n"; $sel->execute( qq{TAL%} ); { local $^W = 0; print join( "\t", @{$sel->{NAME}}, "\n"); while( @row = $sel->fetchrow_array ) { print join( "\t",@row, "\n"); } } print "\n"; # Delete new customer. print "Delete customer: " . $dbh->do( qq{ delete from Customers where CustomerID = 'TAL'} ) . "\n"; $sel->execute( qq{TAL%} ); { local $^W = 0; print join( "\t", @{$sel->{NAME}}, "\n"); while( @row = $sel->fetchrow_array ) { print join( "\t",@row, "\n"); } } print "\n"; # Finished $sel->finish; $dbh->disconnect; exit; DBD-ODBC-1.61/examples/timetest.pl0000755000175000017500000000230512250310263016113 0ustar martinmartin#!perl -w # $Id$ use strict; use DBI qw(:sql_types); my $dbh=DBI->connect() or die "Can't connect"; $dbh->{RaiseError} = 1; $dbh->{LongReadLen} = 800; eval { $dbh->do("drop table foo"); }; my @types = (SQL_TYPE_TIMESTAMP, SQL_TIMESTAMP); my $type; my @row; foreach $type (@types) { my $sth = $dbh->func($type, "GetTypeInfo"); if ($sth) { @row = $sth->fetchrow(); $sth->finish(); last if @row; } else { # warn "Unable to get type for type $type\n"; } } die "Unable to find a suitable test type for date field\n" unless @row; my $dbname = $dbh->get_info(17); # sql_dbms_name my $datetype = $row[0]; print "Date type = $datetype\n"; $dbh->do("Create table foo (idcol integer not null primary key, dt $datetype)"); my @tests = ( "{ts '1998-05-13 00:01:00'}", "{ts '1998-05-15 00:01:00.5'}", "{ts '1998-05-15 00:01:00.210'}", ); my $test; my $i = 0; my $sth = $dbh->prepare("insert into foo (idcol, dt) values (?, ?)"); foreach $test (@tests) { $sth->execute($i++, $test); } $sth = $dbh->prepare("Select idcol, dt from foo order by idcol"); $sth->execute; while (@row = $sth->fetchrow_array) { print join(', ', @row), "\n"; } $dbh->disconnect; DBD-ODBC-1.61/examples/testerrhandler.pl0000644000175000017500000000120312250310263017274 0ustar martinmartinuse strict; # $Id$ use DBI; sub err_handler { my ($state, $msg) = @_; # Strip out all of the driver ID stuff $msg =~ s/^(\[[\w\s]*\])+//; print "===> state: $state msg: $msg\n"; return 0; } my $dbh = DBI->connect("dbi:ODBC:PERL_TEST_SQLSERVER", $ENV{DBI_USER}, $ENV{DBI_PASS}) || die "Can't connect: $DBI::errstr\n"; $dbh->{odbc_err_handler} = \&err_handler; $dbh->{odbc_async_exec} = 1; print "odbc_async_exec is: $dbh->{odbc_async_exec}\n"; my $sth; $sth = $dbh->prepare("dbcc checkdb(model)") || die $dbh->errstr; $sth->execute || die $dbh->errstr; $sth->finish; $dbh->disconnect; DBD-ODBC-1.61/examples/multiple_active_stmts.pl0000644000175000017500000000327212250310263020676 0ustar martinmartin# $Id$ # Various ways of enabling Multiple Active Statements support in # MS SQL Server - what you use depends on your driver. # use strict; use DBI; use Data::Dumper; my $attrs = { RaiseError => 1, PrintError => 0, AutoCommit => 1 }; my %connect_args = (DSN => 'dbi:ODBC:DSN=baugi', USER => 'sa', PASS => undef); my $dbhmakers = { normal => sub { DBI->connect ( (map { $connect_args{"$_"} } (qw/DSN USER PASS/) ), $attrs, ); }, MARs => sub { local $connect_args{DSN} = $connect_args{DSN} . ';MARS_Connection=Yes'; DBI->connect ( (map { $connect_args{$_} } (qw/DSN USER PASS/) ), $attrs, ); }, server_cursors_hack => sub { DBI->connect ( (map { $connect_args{$_} } (qw/DSN USER PASS/) ), { %$attrs, odbc_SQL_ROWSET_SIZE => 2 }, ); }, cursor_type => sub { DBI->connect ( (map { $connect_args{$_} } (qw/DSN USER PASS/) ), { %$attrs, odbc_cursortype => 2 }, ); }, }; for (sort keys %$dbhmakers) { print "\n\nTrying with $_\n"; my $dbh = $dbhmakers->{$_}->(); $dbh->{odbc_SQL_ROWSET_SIZE} = 2; eval { $dbh->do ('DROP TABLE test_foo') }; $dbh->do ('CREATE TABLE test_foo ( bar VARCHAR(20) )'); $dbh->do ("INSERT INTO test_foo (bar) VALUES ( 'baz_$_' )") for (1..5); eval { my @sths; push @sths, $dbh->prepare("SELECT * FROM test_foo") for (1..5); $_->execute for @sths; LOOP: while (1) { for (0 .. $#sths) { my $res = $sths[$_]->fetchrow_arrayref or last LOOP; print "Result from sth $_: $res->[0]\n"; } } }; warn "Died with $@\n" if $@; eval { $dbh->do ('DROP TABLE test_foo') }; } __END__ DBD-ODBC-1.61/examples/backup_restore.pl0000644000175000017500000000104512250310263017262 0ustar martinmartin# $Id$ # backup and restore a MS SQL Server database # needs to loop over odbc_more_results or the procedure does not finish use DBI; use strict; use warnings; use Data::Dumper; sub _error_handler { print Dumper(\@_); 0; } my $h = DBI->connect; $h->{RaiseError} = 1; $h->{HandleError} = \&_error_handler; eval {$h->do('create database foo');}; $h->do(q{backup database foo to disk='c:\foo.bak'}); my $s = $h->prepare(q{restore database foo from disk='c:\foo.bak'}); $s->execute; while ($s->{odbc_more_results}) { print "More\n"; } DBD-ODBC-1.61/examples/joetest7.pl0000755000175000017500000000316112250310263016022 0ustar martinmartin#!perl -w # $Id$ use strict; use DBI qw(:sql_types); my $dbh=DBI->connect() or die "Can't connect"; eval {$dbh->do("DROP TABLE table1");}; eval {$dbh->do("CREATE TABLE table1 (i INTEGER)");}; eval {$dbh->do("DROP PROCEDURE proc1");}; my $proc1 = <do($proc1);}; if (-e "dbitrace.log") { unlink("dbitrace.log"); } $dbh->trace(9,"dbitrace.log"); my $sth = $dbh->prepare ("{call proc1 (?)}"); my $success = -1; $sth->bind_param (1, 99, SQL_INTEGER); $sth->execute(); $success = -1; do { print "Num of fields: $sth->{NUM_OF_FIELDS}\n"; while (my @data = $sth->fetchrow_array()) { ($success) = @data; print "Num of fields: $sth->{NUM_OF_FIELDS}\n" } } while $sth->{odbc_more_results}; print "$success Finished #1\n"; $sth->bind_param (1, 10, SQL_INTEGER); $sth->execute(); $success = -1; do { while (my @data = $sth->fetchrow_array()) {($success) = @data;} } while $sth->{odbc_more_results}; print "$success Finished #2\n"; $sth->bind_param (1, 99, SQL_INTEGER); $sth->execute(); $success = -1; do { while (my @data = $sth->fetchrow_array()) {($success) = @data;} } while $sth->{odbc_more_results}; print "$success Finished #3\n"; $sth->bind_param (1, 99, SQL_INTEGER); $sth->execute(); $success = -1; do { while (my @data = $sth->fetchrow_array()) {($success) = @data;} } while $sth->{odbc_more_results}; print "$success Finished #4\n"; $dbh->disconnect; DBD-ODBC-1.61/examples/testmulti.pl0000644000175000017500000000121612250310263016304 0ustar martinmartinuse DBI; # $Id$ #$ENV{'ODBCINI'}="/export/cmn/etc/odbc.ini" ; my($connectString) = "dbi:ODBC:DSN=TESTDB;Database=xxxxxdata;uid=usrxxxxx;pwd=xxxxx" ; # DBI->trace(9) ; my($dbh)=DBI->connect() ; if ( !defined($dbh) ) { die "Connection failed" ; } my($sqlStr) ; $sqlStr = "select id,name from sysobjects where id=1; select * from sysobjects where id=1; select \@\@rowcount"; my($sth) = $dbh->prepare($sqlStr); $sth->execute; if ( $sth->errstr ){ die $sth->errstr ; } my(@aRefResult) = qw() ; my(@data) = qw() ; my($cnt); do { while ( @data = $sth->fetchrow ) { print join("|",@data), "\n" ; } } while ( $sth->{odbc_more_results} ) ; DBD-ODBC-1.61/examples/testxml.pl0000644000175000017500000000361412250310263015756 0ustar martinmartin#perl -w # $Id$ $| = 1; use DBI qw(:sql_types); use Data::Dumper; my $dbh = DBI->connect() || die "Connect failed: $DBI::errstr\n"; my @data = ( [undef, "z" x 13 ], ["2001-01-01 01:01:01.110", "a" x 12], # "aaaaaaaaaaaa" ["2002-02-02 02:02:02.123", "b" x 114], ["2003-03-03 03:03:03.333", "c" x 251], ["2004-04-04 04:04:04.443", "d" x 282], ["2005-05-05 05:05:05.557", "e" x 131] ); eval { local $dbh->{PrintError} = 0; $dbh->do("DROP TABLE PERL_DBD_TABLE1"); }; $dbh->{RaiseError} = 1; $dbh->{LongReadLen} = 8000; my @types = (SQL_TYPE_TIMESTAMP, SQL_TIMESTAMP); my $type; my @row; my $rowcount = 0; foreach $type (@types) { my $sth = $dbh->func($type, "GetTypeInfo"); if ($sth) { @row = $sth->fetchrow(); $sth->finish(); last if @row; } else { # warn "Unable to get type for type $type\n"; } } die "Unable to find a suitable test type for date field\n" unless @row; my $datetype = $row[0]; $dbh->do("CREATE TABLE PERL_DBD_TABLE1 (i INTEGER, time $datetype, str VARCHAR(4000))"); # Insert records into the database: my $sth1 = $dbh->prepare("INSERT INTO PERL_DBD_TABLE1 (i,time,str) values (?,?,?)"); for (my $i=0; $i<@data; $i++) { my ($time,$str) = @{$data[$i]}; print "Inserting: $i, "; print $time if (defined($time)); print " string length " . length($str) . "\n"; $sth1->bind_param (1, $i, SQL_INTEGER); $sth1->bind_param (2, $time, SQL_TIMESTAMP); $sth1->bind_param (3, $str, SQL_LONGVARCHAR); $sth1->execute or die ($DBI::errstr); } # Retrieve records from the database, and see if they match original data: my $sth2 = $dbh->prepare("SELECT i,time,str FROM PERL_DBD_TABLE1 for xml auto"); $sth2->execute or die ($DBI::errstr); my $iErrCount = 0; while (my @row = $sth2->fetchrow_array()) { print join(', ', @row), "\n"; $rowcount++; } print "retrieved $rowcount rows.\n"; $dbh->disconnect; DBD-ODBC-1.61/examples/raiserror.pl0000644000175000017500000000231612250310263016264 0ustar martinmartinuse DBI; use strict; sub handle_error { my ($state, $msg, $native) = @_; print qq{handle_error: \$state is "$state".}, "\n"; print qq{handle_error: \$msg is "$msg".}, "\n"; print qq{handle_error: \$native is "$native".}, "\n"; return 0; } my $dbh = DBI->connect('dbi:ODBC:baugi','sa','easysoft', { #odbc_err_handler => \&handle_error, }); eval { local $dbh->{PrintError} = 0; $dbh->do("drop procedure t_raiserror"); }; $dbh->do(<<'EOT'); CREATE PROCEDURE t_raiserror (@p1 varchar(50), @p2 int output) AS set @p2=45; raiserror ('An error was raised. Input was "%s".', 16, 1, @p1) return 55 EOT sub test() { my $sth = $dbh->prepare("{? = call t_raiserror(?,?)}"); my ($p1, $p2) = ('fred', undef); $sth->bind_param_inout(1, \my $retval, 4000); $sth->bind_param(2, $p1); $sth->bind_param_inout(3, \$p2, 32); $sth->execute(); print qq/After execute:\n/; print "err=", $sth->err(), "\n"; print "errstr=", $sth->errstr(), "\n"; print "state=", DBI::neat($sth->state()), "\n"; print qq{\$retval is $retval.}, "\n"; print qq{\$p1 is $p1.}, "\n"; print qq{\$p2 is $p2.}, "\n"; } test(); $dbh->disconnect; DBD-ODBC-1.61/examples/testinout.pl0000755000175000017500000000324212250310263016314 0ustar martinmartin#!/usr/bin/perl -w -I./t # $Id$ # use strict; use DBI qw(:sql_types); my (@row); my $dbh = DBI->connect('dbi:ODBC(RaiseError=>1):PERL_TEST_ORACLE'); #$dbh->{RaiseError} = 1; # ------------------------------------------------------------ # oracle specific $dbh->do("create or replace function testfunc(a in integer, b in integer) return integer is c integer; begin if b is null then c := 0; else c := b; end if; return a * c + 1; end;"); #my $sth = $dbh->prepare("begin ? := testfunc(?, ?); end;"); DBI->trace(9,"c:/trace.txt"); #$dbh->do('CREATE FUNCTION testfunc (@p1 int, @p2 int) RETURNS INT AS BEGIN RETURN (@p1+@p2) END'); my $sth = $dbh->prepare("{ ? = call testfunc(?, ?) }"); my $value = 0; my $b = 30; $sth->bind_param_inout(1, \$value, 50, SQL_INTEGER); $sth->bind_param(2, 10, SQL_INTEGER); $sth->bind_param(3, 30, SQL_INTEGER); $sth->execute; print $value, "\n"; $b = undef; $sth->bind_param_inout(1, \$value, 50, SQL_INTEGER); $sth->bind_param(2, 20, SQL_INTEGER); $sth->bind_param(3, undef, SQL_INTEGER); $sth->execute; print $value, "\n"; $dbh->do("create or replace function testfunc(a in integer, b in out integer) return integer is begin if b is null then b := 0; end if; b := b + 1; return a * b + 1; end;"); $sth = $dbh->prepare("{ ? = call testfunc(?, ?) }"); $value = 0; $b = 30; $sth->bind_param_inout(1, \$value, 50, SQL_INTEGER); $sth->bind_param(2, 10, SQL_INTEGER); $sth->bind_param_inout(3, \$b, 50, SQL_INTEGER); $sth->execute; print $value, ", $b\n"; $b = 10; $sth->bind_param_inout(1, \$value, 50, SQL_INTEGER); $sth->bind_param(2, 20, SQL_INTEGER); $sth->bind_param_inout(3, \$b, 50, SQL_INTEGER); $sth->execute; print $value, ", $b\n"; $dbh->disconnect(); DBD-ODBC-1.61/examples/listtabs.pl0000755000175000017500000000153012250310263016101 0ustar martinmartin#!/usr/bin/perl -I./t # $Id$ require DBI; my (@row); my $dbh = DBI->connect() || die "Can't connect to your $ENV{DBI_DSN} using user: $ENV{DBI_USER} and pass: $ENV{DBI_PASS}\n$DBI::errstr\n"; # ------------------------------------------------------------ my $rows = 0; my @tables; my $table; $| = 1; if (@tables = $dbh->tables) { print join(', ', @tables), "\n"; foreach $table (@tables) { my $schema = ''; if ($table =~ m/(.*)\.(.*)$/) { $schema = $1; $table = $2; } my $sthcols = $dbh->func('',$schema, $table,'', columns); if ($sthcols) { while (@row = $sthcols->fetchrow_array) { print "\t", join(', ', @row), "\n"; } } else { # hmmm...none of my drivers support this...dang. I can't test it. print "SQLColumns: $DBI::errstr\n"; } } } $dbh->disconnect(); DBD-ODBC-1.61/examples/moreresults.pl0000755000175000017500000000137412250310263016646 0ustar martinmartin#!perl -w # $Id$ # # Sorry -- this test is pretty specific to MSSQL Server and Sybase... # use DBI; my (@row); my $dbh; $dbh = DBI->connect() || die "Can't connect to your $ENV{DBI_DSN} using user: $ENV{DBI_USER} and pass: $ENV{DBI_PASS}\n$DBI::errstr\n"; # ------------------------------------------------------------ my $result_sets = 0; $| = 1; my $sth; $sth = $dbh->prepare("{call sp_spaceused}") or die $dbh->errstr; $sth->execute or die $sth->errstr; do { print join(":", @{$sth->{NAME}}), "\n"; while ( my $ref = $sth->fetch ) { print join(":", @$ref), "\n"; } } while ($sth->{odbc_more_results}); print "(", $sth->rows, " rows affected)\n"; $sth->finish; $dbh->disconnect(); DBD-ODBC-1.61/examples/unicode_params.pl0000644000175000017500000000206712250310263017250 0ustar martinmartin# $Id$ # Quick demo of inserting and retrieving unicode strings # NOTE: your DBD::ODBC really needs to be built with unicode # and this script will warn if not. You can comment the die out and it # will work with some drivers without being built for unicode but you'll # get slightly different output: # # with unicode: # $VAR1 = [ # [ # "\x{20ac}" # note, is a unicode Perl string # ] # ]; # is utf8 1 # # without unicode: # # $VAR1 = [ # [ # '€' # note, not a unicode Perl string # ] # ]; # is utf8 # use DBI; use strict; use Data::Dumper; use utf8; my $h = DBI->connect(); warn "Warning DBD::ODBC not built for unicode - you probably don't want to do this" if !$h->{'odbc_has_unicode'}; eval { $h->do(q/drop table mje/); }; $h->do(q/create table mje (a nvarchar(20))/); $h->do(q/insert into mje values(?)/, undef, "\x{20ac}"); my $s = $h->prepare(q/select * from mje/); $s->execute; my $f = $s->fetchall_arrayref; print Dumper($f), "\n"; print "is utf8 ", utf8::is_utf8($f->[0][0]), "\n"; DBD-ODBC-1.61/examples/params_in_error.pl0000644000175000017500000000367512250310263017447 0ustar martinmartin# $Id$ # # Code to demonstrate new (experimental) odbc_getdiag* and how you can find # the bound parameter/column in error # use DBI qw(:sql_types); use strict; use warnings; use Data::Dumper; use DBD::ODBC qw(:diags); my $h = DBI->connect('dbi:ODBC:baugi','sa','easysoft', {RaiseError => 1, PrintError => 0}); eval { local $h->{PrintError} = 0; $h->do(q/drop table test/); $h->do(q/drop table test2/); }; $h->do(q/create table test (a int, b int)/); $h->do(q/create table test2 (a varchar(20), b varchar(20))/); my $s = $h->prepare(q/insert into test values(?,?)/); $s->bind_param(1, 'fred'); $s->bind_param(2, 1); eval { $s->execute; }; if ($@) { # NOTE from 1.34_3 calling odbc_getdiag* would clear DBI's errors # and so if you wanted them you'd have to call DBI's error methods first. # From 1.34_4 calling odbc_getdiag* will not clear DBI's errors. my @diags = $s->odbc_getdiagrec(1); my $dbierr = $s->errstr; print <<"EOT"; DBI error is $dbierr which was created from the ODBC diagnostics: $diags[0] $diags[1] $diags[2] EOT my $p = $s->odbc_getdiagfield(1, SQL_DIAG_COLUMN_NUMBER); print "The parameter in error is $p\n"; } $h->do(q/insert into test2 values(?,?)/, undef, 1, 'fred'); $s = $h->prepare(q/select a,b from test2/); $s->execute; my ($a, $b); $s->bind_col(1, \$a, {TYPE => SQL_INTEGER}); $s->bind_col(2, \$b, {TYPE => SQL_INTEGER}); eval { $s->fetch; }; if ($@) { # NOTE from 1.34_3 calling odbc_getdiag* would clear DBI's errors # and so if you wanted them you'd have to call DBI's error methods first. # From 1.34_4 calling odbc_getdiag* will not clear DBI's errors. my @diags = $s->odbc_getdiagrec(1); my $dbierr = $s->errstr; print <<"EOT"; DBI error is $dbierr which was created from the ODBC diagnostics: $diags[0] $diags[1] $diags[2] EOT my $p = $s->odbc_getdiagfield(1, SQL_DIAG_COLUMN_NUMBER); print "The column in error is $p\n"; } DBD-ODBC-1.61/examples/execute_for_fetch.pl0000644000175000017500000000625112250310263017737 0ustar martinmartin# $Id$ # # Demonstrate DBD::ODBC's execute_for_fetch. # By default DBD::ODBC has its own execute_for_fetch which should always # be quicker than DBI's implementation which just does loads of execute calls # one for each insert whereas DBD::ODBC's defaults to 10 inserts at a time. # Also shows how changing DBD::ODBC's odbc_batch_size can influence the # speed but at the expense of memory. # # You can run with no args to run the Benchmark or you can provide an # command line arg of 'dbi', 'eff' or 'eff50', efftxn and eff50txn # to run an individual method. # The tests with txn in the name start a transaction at the start and commit # at the end which is always faster. use DBI; use Data::Dumper; use strict; use warnings; use Benchmark; my $fetch_row = 0; my $x = '11111111112' x 1000; my @p = split (/2/,$x); print "Total rows to insert = ", scalar(@p), "\n"; if (@ARGV) { if ($ARGV[0] eq 'dbi') { two(); } elsif ($ARGV[0] eq 'dbitxn') { two(1); } elsif ($ARGV[0] eq 'eff') { one(); } elsif ($ARGV[0] eq 'eff50') { one(1); } elsif ($ARGV[0] eq 'efftxn') { one(undef, 1); } elsif ($ARGV[0] eq 'eff50txn') { one(1,1); } } else { timethese(20, { 'execute_for_fetch_default' => sub {one()}, 'dbi' => sub {two()}, 'dbitxn' => sub {two(1)}, 'execute_for_fetch_batch_size' => sub {one(1)}, 'execute_for_fetch_txn', => sub {one(undef, 1)}, 'execute_for_fetch_batch_size_txn' => sub {one(1,1)} }); } # any arg true enables odbc array operations sub dbconnect { my $enable = shift; my $h = DBI->connect("dbi:ODBC:DSN=baugi","sa","easysoft", {RaiseError => 1, PrintError => 0, odbc_array_operations => $enable, }); eval { local $h->{PrintError} = 0; $h->do(q/drop table two/); }; $h->do(q/create table two (a varchar(20))/); return $h; } # any true first arg sets odbc_batch_size to 50 (5 * the default) # any true second arg starts a transaction and commits it at the end sub one { my $h = dbconnect(1); $h->{odbc_batch_size} = 50 if $_[0]; $h->begin_work if $_[1]; doit($h); $h->commit if $_[1]; $h->disconnect; } sub two { my $h = dbconnect(0); $h->begin_work if $_[0]; doit($h); $h->commit if $_[0]; $h->disconnect; } sub doit { my $h = shift; #print "dbh odbc_batch_size=", $h->{odbc_batch_size}, "\n"; my $s = $h->prepare(q/insert into two values(?)/); #print "sth odbc_batch_size=", $s->{odbc_batch_size}, "\n"; my ($tuples, $rows, @tuple_status); #print "About to run execute_for_fetch\n"; eval { ($tuples, $rows) = $s->execute_for_fetch(\&fetch_sub, \@tuple_status); }; if ($@) { print "execute_for_fetch died : $@ END\n"; } #print "tuples = ", Dumper($tuples), "rows = ", Dumper($rows), "\n"; #print "tuple status ", Dumper(\@tuple_status), "\n"; $s = undef; #my $r = $h->selectall_arrayref(q/select * from two/); #print "Rows:", scalar(@$r), "\n"; #print Dumper($r); #$h->do(q/delete from two/); } sub fetch_sub { #print "fetch_sub $fetch_row\n"; if ($fetch_row == @p) { #print "returning undef\n"; $fetch_row = 0; return; } return [$p[$fetch_row++]]; } DBD-ODBC-1.61/examples/enable_odbc_tracing.pl0000644000175000017500000000116012250310263020174 0ustar martinmartin# $Id$ # Shows how to enable ODBC API tracing for this Perl script. # NOTE: the ODBC Driver manager does the actual tracing use strict; use warnings; use DBI; my $h = DBI->connect($ENV{DBI_DSN}, $ENV{DBI_USER}, $ENV{DBI_PASS}, {odbc_trace_file => 'c:\users\martin\odbc.trc', odbc_trace => 1}); print "trace is ", $h->{odbc_trace}, ", ", $h->{odbc_trace_file}, "\n"; my $s = $h->prepare('select 1'); $s->execute; $h->{odbc_trace} = 0; print "trace is ", $h->{odbc_trace}, "\n"; $s->fetch; $s->fetch; $h->{odbc_trace} = 1; print "trace is ", $h->{odbc_trace}, "\n"; $h->disconnect; DBD-ODBC-1.61/examples/proctest3.pl0000755000175000017500000000175612250310263016214 0ustar martinmartin#!perl -w # $Id$ use DBI; use strict; my $dbh = DBI->connect(); $dbh->{LongReadLen} = 8000; eval { local $dbh->{PrintError} = 0; $dbh->do("drop procedure PERL_DBD_TESTPRC"); }; $dbh->do("CREATE PROCEDURE PERL_DBD_TESTPRC \@parameter1 int = 0 AS if (\@parameter1 >= 0) select * from systypes RETURN(\@parameter1) "); sub test() { my $sth = $dbh->prepare("{call PERL_DBD_TESTPRC(?)}"); $sth->bind_param(1, -1, { TYPE => 4 }); $sth->execute(); print '$sth->{NUM_OF_FIELDS}: ', $sth->{NUM_OF_FIELDS}, " expected: 0\n"; if($sth->{NUM_OF_FIELDS}) { my @row; while (@row = $sth->fetchrow_array()) { print join(', ', @row), "\n"; } } } ########################################## ### Test ########################################## unlink("dbitrace.log") if (-e "dbitrace.log"); $dbh->trace(9, "dbitrace.log"); test(); ########################################## ### Cleanup... ########################################## $dbh->disconnect; DBD-ODBC-1.61/examples/testfunc.pl0000755000175000017500000000620112250310263016107 0ustar martinmartin#!/usr/bin/perl -w -I./t # $Id$ # use strict; use DBI qw(:sql_types); # use DBD::ODBC::Const qw(:sql_types); my (@row); my $dbh = DBI->connect() or exit(0); $dbh->{RaiseError} = 1; # ------------------------------------------------------------ # dumb, for now... # SQL_DRIVER_VER returns string # SQL_CURSOR_COMMIT_BEHAVIOR returns 16 bit value # SQL_ALTER_TABLE returns 32 bit value # SQL_ACCESSIBLE_PROCEDURES returns short string (Y or N) my %InfoTests = ( 'SQL_DRIVER_NAME', 6, 'SQL_DRIVER_VER', 7, 'SQL_DRIVER_ODBC_VER', 77, 'SQL_DATABASE_NAME', 16, 'SQL_DBMS_NAME', 17, 'SQL_DBMS_VER', 18, 'SQL_IDENTIFIER_QUOTE_CHAR', 29, 'SQL_DM_VER', 171, 'SQL_CATALOG_NAME_SEPARATOR', 41, 'SQL_CATALOG_LOCATION', 114, 'SQL_CURSOR_COMMIT_BEHAVIOR', 23, 'SQL_ALTER_TABLE', 86, 'SQL_ACCESSIBLE_PROCEDURES', 20, 'SQL_PROCEDURES', 21, 'SQL_MULT_RESULT_SETS', 36, 'SQL_PROCEDURE_TERM', 40, ); my %TypeTests = ( 'SQL_ALL_TYPES' => 0, 'SQL_VARCHAR' => SQL_VARCHAR, 'SQL_CHAR' => SQL_CHAR, 'SQL_INTEGER' => SQL_INTEGER, 'SQL_SMALLINT' => SQL_SMALLINT, 'SQL_NUMERIC' => SQL_NUMERIC, 'SQL_LONGVARCHAR' => SQL_LONGVARCHAR, 'SQL_LONGVARBINARY' => SQL_LONGVARBINARY, 'SQL_WVARCHAR' => SQL_WVARCHAR, 'SQL_WCHAR' => SQL_WCHAR, 'SQL_WLONGVARCHAR' => SQL_WLONGVARCHAR, ); my $ret; print "\nInformation for DBI_DSN=$ENV{'DBI_DSN'}\n\n"; my $SQLInfo; foreach $SQLInfo (sort keys %InfoTests) { $ret = 0; $ret = $dbh->get_info($InfoTests{$SQLInfo}); print "$SQLInfo ($InfoTests{$SQLInfo}):\t$ret\n"; } print "\nGetfunctions : ", join(",", $dbh->func(0, GetFunctions)), "\n\n"; print "\nGetfunctions v3: ", join(",", $dbh->func(999, GetFunctions)), "\n\n"; foreach $SQLInfo (sort keys %TypeTests) { print "Listing all $SQLInfo types\n"; $sth = $dbh->func($TypeTests{$SQLInfo}, GetTypeInfo); if ($sth) { my $colcount = $sth->func(1, 0, ColAttributes); # 1 for col (unused) 0 for SQL_COLUMN_COUNT # print "Column count is $colcount\n"; my $i; my @coldescs = (); # column 0 should be an error/blank for ($i = 0; $i <= $colcount; $i++) { # $i is colno (1 based) 2 is for SQL_COLUMN_TYPE # $i == 0 is intentional error...tests error handling. my $stype = $sth->func($i, 2, ColAttributes); my $sname = $sth->func($i, 1, ColAttributes); # print "Col Attributes (TYPE): ", &nullif($stype), "\n"; # print "Col Attributes (NAME): ", &nullif($sname), "\n"; push(@coldescs, $sname); # print "Desc Col: ", join(', ', &nullif($sth->func($i, DescribeCol))), "\n"; } # print join(', ', @coldescs), "\n"; while (@row = $sth->fetchrow()) { print "\t$row[0]\n", # &nullif($row[1]), ", " , #&nullif($row[2]), ", " , #&nullif($row[3]), ", " , #&nullif($row[4]), ", " , #&nullif($row[5]), "\n"; # print join(', ', @row), "\n"; } $sth->finish(); } else { # no info on that type... print "no info for this type\n"; } } my $SQL_XOPEN_CLI_YEAR = 10000; print "\nSQL_XOPEN_CLI_YEAR = ", $dbh->get_info($SQL_XOPEN_CLI_YEAR), "\n"; $dbh->disconnect(); sub nullif ($) { my $val = shift; $val ? $val : "(null)"; } DBD-ODBC-1.61/examples/testigparams.pl0000755000175000017500000000324212250310263016761 0ustar martinmartin#!/usr/bin/perl -w -I./t # $Id$ # use strict; use DBI qw(:sql_types); my (@row); my $dbh = DBI->connect('dbi:ODBC(RaiseError=>1):PERL_TEST_ORACLE'); #$dbh->{RaiseError} = 1; # ------------------------------------------------------------ # oracle specific $dbh->do("create or replace function testfunc(a in integer, b in integer) return integer is c integer; begin if b is null then c := 0; else c := b; end if; return a * c + 1; end;"); #my $sth = $dbh->prepare("begin ? := testfunc(?, ?); end;"); DBI->trace(9,"c:/trace.txt"); #$dbh->do('CREATE FUNCTION testfunc (@p1 int, @p2 int) RETURNS INT AS BEGIN RETURN (@p1+@p2) END'); my $sth = $dbh->prepare("{ ? = call testfunc(?, ?) }"); my $value = 0; my $b = 30; $sth->bind_param_inout(1, \$value, 50, SQL_INTEGER); $sth->bind_param(2, 10, SQL_INTEGER); $sth->bind_param(3, 30, SQL_INTEGER); $sth->execute; print $value, "\n"; $b = undef; $sth->bind_param_inout(1, \$value, 50, SQL_INTEGER); $sth->bind_param(2, 20, SQL_INTEGER); $sth->bind_param(3, undef, SQL_INTEGER); $sth->execute; print $value, "\n"; $dbh->do("create or replace function testfunc(a in integer, b in out integer) return integer is begin if b is null then b := 0; end if; b := b + 1; return a * b + 1; end;"); $sth = $dbh->prepare("{ ? = call testfunc(?, ?) }"); $value = 0; $b = 30; $sth->bind_param_inout(1, \$value, 50, SQL_INTEGER); $sth->bind_param(2, 10, SQL_INTEGER); $sth->bind_param_inout(3, \$b, 50, SQL_INTEGER); $sth->execute; print $value, ", $b\n"; $b = 10; $sth->bind_param_inout(1, \$value, 50, SQL_INTEGER); $sth->bind_param(2, 20, SQL_INTEGER); $sth->bind_param_inout(3, \$b, 50, SQL_INTEGER); $sth->execute; print $value, ", $b\n"; $dbh->disconnect(); DBD-ODBC-1.61/examples/testundef3.pl0000755000175000017500000000122612250310263016342 0ustar martinmartin#!perl -w # $Id$ use strict; use DBI qw(:sql_types); my $dbh=DBI->connect() or die "Can't connect"; eval {$dbh->do("DROP TABLE table1");}; eval {$dbh->do("CREATE TABLE table1 (str VARCHAR(10))");}; unlink("dbitrace.log") if (-e "dbitrace.log") ; $dbh->trace(8, "dbitrace.log"); my $sth = $dbh->prepare ("INSERT INTO table1 (str) VALUES (?)"); $sth->bind_param (1, undef, SQL_VARCHAR); $sth->execute(); $sth->bind_param (1, "abcde", SQL_VARCHAR); $sth->execute(); my $sth2 = $dbh->prepare("select * from table1"); $sth2->execute; my @row; my $i = 0; while (@row = $sth2->fetchrow_array) { $i++; print "$i: ", join(', ', @row), "\n"; } $dbh->disconnect; DBD-ODBC-1.61/examples/testproc2.pl0000755000175000017500000000223612250310263016205 0ustar martinmartin#!perl -w # $Id$ use strict; use DBI; # Connect to the database, and create two tables and a stored procedure: my $dbh=DBI->connect() or die "Can't connect"; eval {$dbh->do("DROP TABLE table1");}; eval {$dbh->do("CREATE TABLE table1 (i INTEGER)");}; eval {$dbh->do("DROP TABLE table2");}; eval {$dbh->do("CREATE TABLE table2 (i INTEGER)");}; eval {$dbh->do("DROP PROCEDURE proc1");}; eval {$dbh->do("CREATE PROCEDURE proc1 AS ". "BEGIN INSERT INTO table1 VALUES (1); END;");}; unlink "dbitrace.log" if (-e "dbitrace.log"); $dbh->trace(9, "dbitrace.log"); # Insert a row into table1, either directly or indirectly: my $direct = 0; my $sth1; if ($direct) { $sth1 = $dbh->prepare ("INSERT INTO table1 VALUES (1)"); } else { $sth1 = $dbh->prepare ("{ call proc1 }"); } $sth1->execute(); $sth1->execute(); $sth1->execute(); # Insert a row into table2 (this fails after an indirect insertion): my $sth2 = $dbh->prepare ("INSERT INTO table2 VALUES (2)"); $sth2->execute(); my $sth = $dbh->prepare("select i from table1 union select i from table2"); my @row; $sth->execute; while (@row = $sth->fetchrow_array) { print $row[0], "\n"; } $dbh->disconnect; DBD-ODBC-1.61/examples/proctest2.pl0000755000175000017500000000451412250310263016206 0ustar martinmartin#!perl -w # $Id$ use DBI; use strict; use Data::Dumper; unlink("dbitrace.log") if (-e "dbitrace.log"); DBI->trace(9, "dbitrace.log"); my $dbh = DBI->connect(); $dbh->{LongReadLen} = 8000; $dbh->{FetchHashKeyName} = 'NAME_uc'; my $dbh2 = DBI->connect(); $dbh2->{LongReadLen} = 8000; $dbh2->{FetchHashKeyName} = 'NAME_uc'; eval { local $dbh->{PrintError} = 0; $dbh->do("drop procedure PERL_DBD_TESTPRC"); }; $dbh->do("CREATE PROCEDURE PERL_DBD_TESTPRC \@parameter1 int = 22 AS /* SET NOCOUNT ON */ select 1 as some_data select isnull(\@parameter1, 0) as parameter1, 3 as some_more_data -- print 'kaboom' RETURN(\@parameter1 + 1)"); my $innerTestSth; sub innerTest($) { my ($outputTempate) = @_; my %outputData; my $queryInputParameter1 = 2222; my $queryOutputParameter = $outputTempate; if(!defined $innerTestSth) { $innerTestSth = $dbh2->prepare('{? = call PERL_DBD_TESTPRC(?) }'); } $innerTestSth->bind_param_inout(1, \$queryOutputParameter, 30, { TYPE => DBI::SQL_INTEGER }); $innerTestSth->bind_param(2, $queryInputParameter1, { TYPE => DBI::SQL_INTEGER }); # $sth->trace(1);#, 'DbiTest.txt'); $innerTestSth->execute(); print '$innerTestSth->{Active}: ', $innerTestSth->{Active}, "\n"; do { my $rowRef; undef $rowRef; print "Columns: ", join(', ', @{$innerTestSth->{NAME}}), "\n"; for(;$rowRef = $innerTestSth->fetchrow_hashref(); ) { print '%$rowRef2 ', Dumper(\%$rowRef), "\n"; } } while($innerTestSth->{odbc_more_results}); print '$queryOutputParameter: \'', $queryOutputParameter, '\' expected: (', $queryInputParameter1 + 1, ")\n\n"; } sub test($) { my ($outputTempate) = @_; my $queryInputParameter1 = 2222; my $queryOutputParameter = $outputTempate; my $sth = $dbh->prepare('select ID from (select 1 as ID union select 2 as ID union select 3 as ID) tmp order by ID'); $sth->execute(); do { for(my $rowRef = undef; $rowRef = $sth->fetchrow_hashref('NAME'); ) { print '%$rowRef ', Dumper(\%$rowRef), "\n"; innerTest($outputTempate); } } while($sth->{odbc_more_results}); } ########################################## ### Test ########################################## test(10); ########################################## ### Cleanup... ########################################## $dbh2->disconnect; $dbh->disconnect; DBD-ODBC-1.61/examples/column_info.pl0000755000175000017500000000131412250310263016564 0ustar martinmartin#!perl -w # $Id$ use strict; use DBI; my $dbh = DBI->connect() or die "Can't connect"; $dbh->{RaiseError} = 1; $dbh->{LongReadLen} = 800; my @tables = $dbh->tables; my @mtable = grep(/foo/, @tables); my ($catalog, $schema, $table) = split(/\./, $mtable[0]); $catalog =~ s/"//g; $schema =~ s/"//g; $table =~ s/"//g; print "Getting column info for: $catalog, $schema, $table\n"; my $sth = $dbh->column_info($catalog, $schema, $table, undef); my @row; print join(', ', @{$sth->{NAME}}), "\n"; while (@row = $sth->fetchrow_array) { # join prints nasty warning messages with -w. There's gotta be a better way... foreach (@row) { $_ = "" if (!defined); } print join(", ", @row), "\n"; } $dbh->disconnect; DBD-ODBC-1.61/examples/testdatasources.pl0000755000175000017500000000015212250310263017470 0ustar martinmartinuse DBI; # $Id$ print join(', ', DBI->data_sources("ODBC")), "\n"; print $DBI::errstr; print "\n"; DBD-ODBC-1.61/examples/dml_counts.pl0000644000175000017500000000465412250310263016432 0ustar martinmartin# $Id$ # # Multiple ways of getting DML counts # Provided for: # http://stackoverflow.com/questions/4202178/perl-dbi-getting-records-affected-of-each-statement-in-a-transaction use DBI; use strict; use warnings; use Data::Dumper; my $h = DBI->connect(); $h->{RaiseError} = 1; eval {$h->do(q/drop table mje/)}; $h->do(q/create table mje (a int)/); sub example1 { my $s = $h->prepare(<<'EOT'); declare @insert_count int declare @update_count int declare @delete_count int begin tran insert into mje values(1); select @insert_count = @@rowcount update mje set a = 2 where a = 1; select @update_count = @@rowcount delete from mje where a = 2; select @delete_count = @@rowcount commit tran select @insert_count, @update_count, @delete_count EOT print "execute: ", $s->execute, "\n"; return $s; } sub example2 { my $s = $h->prepare(<<'EOT'); begin tran insert into mje values(1); select @@rowcount update mje set a = 2 where a = 1; select @@rowcount delete from mje where a = 2; select @@rowcount commit tran EOT print "execute: ", $s->execute, "\n"; return $s; } sub example3 { eval {$h->do(q/drop procedure pmje/)}; $h->do(<<'EOT'); create procedure pmje (@insert int OUTPUT, @update int OUTPUT, @delete int OUTPUT) AS begin tran insert into mje values(1); select @insert = @@rowcount update mje set a = 2 where a = 1; select @update = @@rowcount delete from mje where a = 2; select @delete = @@rowcount commit tran EOT my $s = $h->prepare(q/{call pmje(?,?,?)}/); $s->bind_param_inout(1, \my $insert, 100); $s->bind_param_inout(2, \my $update, 100); $s->bind_param_inout(3, \my $delete, 100); $s->execute; print "example3 insert=$insert, update=$update, delete=$delete\n"; } sub example4 { my ($inserted, $updated, $deleted); eval { $h->begin_work; $inserted = $h->do(q/insert into mje values(1)/); $updated = $h->do(q/update mje set a = 2 where a = 1/); $deleted = $h->do(q/delete from mje where a = 2/); $h->commit; }; if ($@) { $h->rollback or warn "Failed to rollback"; } print "example4 insert=$inserted, update=$updated, delete=$deleted\n"; } sub show_result { my $s = shift; do { while (my @row = $s->fetchrow_array) { print Dumper(\@row), "\n"; } } while ($s->{odbc_more_results}); } my $s = example1(); show_result($s); $s = example2(); show_result($s); example3(); example4(); DBD-ODBC-1.61/unicode_helper.h0000644000175000017500000000067213614770327015262 0ustar martinmartin#ifdef WITH_UNICODE #ifndef unicode_helper_h #define unicode_helper_h #include "ConvertUTF.h" UTF16 * WValloc(char * s); void WVfree(UTF16 * wp); void sv_setwvn(pTHX_ SV * sv, UTF16 * wp, STRLEN len); SV *sv_newwvn(pTHX_ UTF16 * wp, STRLEN len); char * PVallocW(UTF16 * wp); void PVfreeW(char * s); void SV_toWCHAR(pTHX_ SV * sv); void utf8sv_to_wcharsv(pTHX_ SV *sv); #endif /* defined unicode_helper_h */ #endif /* WITH_UNICODE */ DBD-ODBC-1.61/ODBC.h0000644000175000017500000000444213614567361013005 0ustar martinmartin/* * $Id$ * Copyright (c) 1994,1995,1996,1997 Tim Bunce * * You may distribute under the terms of either the GNU General Public * License or the Artistic License, as specified in the Perl README file. */ #include "dbdodbc.h" /* Get SQL_* defs *before* loading DBIXS.h */ #define NEED_DBIXS_VERSION 93 #define PERL_NO_GET_CONTEXT #include /* from DBI. Load this after dbdodbc.h */ #include "dbivport.h" /* copied from DBI to maintain compatibility */ #include "dbdimp.h" #include /* from DBI. Load this after dbdodbc.h */ SV *odbc_get_info _((SV *dbh, int ftype)); int odbc_get_type_info _((SV *dbh, SV *sth, int ftype)); SV *odbc_col_attributes _((SV *sth, int colno, int desctype)); SV *odbc_cancel _((SV *sth)); int odbc_describe_col _((SV *sth, int colno, char *ColumnName, I16 BufferLength, I16 *NameLength, I16 *DataType, U32 *ColumnSize, I16 *DecimalDigits, I16 *Nullable)); int odbc_db_columns _((SV *dbh, SV *sth, SV *catalog, SV *schema, SV *table, SV *column)); int odbc_st_tables _((SV *dbh, SV *sth, SV *catalog, SV *schema, SV *table, SV *table_type)); int odbc_st_primary_keys _((SV *dbh, SV *sth, char *catalog, char *schema, char *table)); int odbc_get_statistics _((SV *dbh, SV *sth, char *CatalogName, char *SchemaName, char *TableName, int Unique)); int odbc_get_special_columns _((SV *dbh, SV *sth, int Identifier, char *CatalogName, char *SchemaName, char *TableName, int Scope, int Nullable)); int odbc_get_foreign_keys _((SV *dbh, SV *sth, char *PK_CatalogName, char *PK_SchemaName, char *PK_TableName, char *FK_CatalogName, char *FK_SchemaName, char *FK_TableName)); void dbd_error _((SV *h, RETCODE err_rc, char *what)); void dbd_error2 _((SV *h, RETCODE err_rc, char *what, HENV henv, HDBC hdbc, HSTMT hstmt)); SQLLEN dbd_db_execdirect _(( SV *dbh, SV *statement )); IV odbc_st_lob_read(SV *sth, int colno, SV *data, UV length, IV type); IV odbc_st_execute_for_fetch(SV *sth, SV *tuples, IV count, SV *tuple_status); IV odbc_st_rowcount(SV *sth); int dbd_st_statistics(SV *dbh, SV *sth, char *catalog, char *schema, char *table, int unique, int quick); int dbd_st_bind_col(SV *sth, imp_sth_t *imp_sth, SV *col, SV *ref, IV type, SV *attribs); AV* dbd_data_sources(SV *drh ); /* end of ODBC.h */ DBD-ODBC-1.61/README.af0000644000175000017500000001660212254016131013352 0ustar martinmartin=for humans This file is written in the POD format, use pod2html or similar to view it. =head1 NOTE This file is now really historical. Unicode support in DBD::ODBC has matured greatly since Alexander's initial patch although I greatly appreciate it. =head1 Unicode patch for DBD::ODBC 1.13 Date: 2006-03-04 =head2 Goals =over 4 =item * Allow an application on Win32 using DBD::ODBC to store and fetch Unicode strings in a MS SQL Server 2000 database. =item * Minimal changes to the existing driver code. =item * Don't break working code. =item * Make the DBD::ODBC "do the right thing" with the UTF8 flag. =item * A first step towards proper Unicode support for DBD:ODBC. =back =head2 Legal disclaimer The Unicode patch for DBD::ODBC was initially written by Alexander Foken (alexander at foken dot de). I wrote this patch during my working hours for a project that needs Unicode support in DBD::ODBC on Windows, and I have the permission of my former employer to publish this patch. You may distribute under the terms of either the GNU General Public License or the Artistic License, as specified in the Perl README file. =head2 Applying the Patch =over 4 =item 1. get DBD::ODBC 1.13 from CPAN =item 2. unpack it =item 3. C DBD-ODBC-1.13-Unicode.patch> =item 4. build DBD::ODBC as usual (C, C, C, C) =back =head2 Tested Databases =over 4 =item Microsoft SQL Server 2000 SP3 Works, using the ODBC driver from the MS SQL 2000 CDROM. You need to install SP1, SP2, and SP3 on both client and server, because DBD::ODBC needs a recent set of MDAC libraries. =item Oracle 9.2 Works, using the ODBC driver from the Oracle 9.2 CDROM. You have to set the environment variables C and C (or any other language setting ending with "C<.AL32UTF8>") before loading the patched DBD::ODBC to make Oracle return Unicode data. (See also "Oracle and Unicode" in the POD of DBD::Oracle.) And you have to enable the "C" Workaround in the Oracle ODBC Driver Configuration to make Oracle return Unicode to a non-Unicode application. Alternatively, you can include "C" in your connect string. Better try to use DBD::Oracle to access Oracle with less overhead and better support for Oracle-specific stuff. =item Oracle 8.x Not tested but known not to support Unicode very well. Quoting the DBD::Oracle man page: "I [...] I" =item MS Access 2000 Unicode tests fail because MS Access 2000 seems not to give any hints about Unicode, so all data is treated as non-Unicode. You do not want to use this combination. You may want to try the MSDE, it has the SQL Server engine, but with a lower connection limit and without GUI tools. There are several 3rd party GUIs for the MSDE. =item PostgreSQL 8.0.3 Some tests from the original DBD::ODBC 1.13 fail with PostgreSQL 8.0.3, so you may not want to use DBD::ODBC to connect to PostgreSQL 8.0.3. Unicode tests fail because PostgreSQL seems not to give any hints about Unicode, so all data is treated as non-Unicode. Better try to use DBD::Pg to access PostgreSQL with less overhead and better support for PostgreSQL-specific stuff. DBD::Pg has a driver attribute named C, set it to 1 and you have proper Unicode support. =back =head2 Tested Operating Systems and ODBC Managers =over 4 =item MS Windows 2000 Professional and Server, using the standard ODBC Manager from Microsoft. =back (Yes, this list should be longer.) =head2 Known Problems Perl 5.8.1 or newer is required. Older Perl before 5.8.0 lacked proper Unicode support. Perl 5.8.0 lacks some auxillary functions for Unicode. Unicode is supported only for SQL statement parameters and data returned by the fetch methods, SQL statements are still treated as native encoding. If you need a unicode constant in an SQL statement, you have to pass it as parameter or use SQL functions to convert your constant from native encoding to Unicode. All data passed to the patched DBD::ODBC for C, C, C, and C is treated as Unicode, even if it is not Unicode. F should check the UTF8 flag of the scalar and pass a value different from C as first argument to C. The problem is to know what encoding is used for the data in the scalar. Binding of unicode output parameters is untested (I don't need them) and likely to fail. The patched DBD::ODBC may fail to compile on non-Win32 platforms. It needs a header file named F defining at least the following: =over 4 =item A C data type capable of storing a single Unicode character. Microsoft uses C in F, and C in F. =item C C for wide characters. Microsoft declares this function in both F and F. =item C C for wide characters, returns character count, not bytes. Microsoft declares this function in both F and F. =item WideCharToMultiByte() and MultiByteToWideChar() functions Encoding converter functions. WideChar in this context means the native Unicode representation of the ODBC API (UCS-2 or UTF-16LE for Windows), MultiByte in this context means Perls native Unicode representation (UTF-8). Microsoft declares the two functions in F. The C argument tells the function that the MultiByte string is in UTF-8 encoding. =back =head2 Technical This patch adds support for C, C, C, and C. Strings written to columns that are reported as C, C, or C are automatically converted to 16 bit Unicode using the Windows API function C, return values reported as C, C, C, or C are converted back to 8 bit Unicode (UTF-8) using the Windows API function C and have Perl's UTF8 flag set except for empty strings. =head2 Tests This patch adds two new tests, F and F. Test 40 checks that Unicode strings can be entered as bound parameters and are returned unmodified. Test 41 creates a table, writes and reads unicode data using various bind variants. When using Oracle, the empty string test in F is skipped because Oracle converts empty strings to NULL in this situation. I had to add C, C, C to F, because Oracle in the setup described above returns Unicode more often than expected. I added F, that exports two utility functions for unicode string tests: =over 4 =item C Dumps a string, indicating its Unicode flag, length and all characters in ASCII notation. =item C Compares two strings that may be contain Unicode, and calls C or C. =back =head2 See also =over 4 =item * Microsoft ODBC documentation =item * Microsoft API documentation =item * http://www.unicode.org/ =item * DBI =item * DBD::ODBC =item * DBD::Oracle =item * DBD::Pg =back DBD-ODBC-1.61/Makefile.PL0000644000175000017500000012676413614543612014104 0ustar martinmartin#!/usr/local/bin/perl -w # # You may distribute under the terms of either the GNU General Public # License or the Artistic License, as specified in the Perl README file. # # Test dependencies on CPAN: # http://cpandeps.cantrell.org.uk/?module=DBD::ODBC;perl=latest # ## no critic (ProhibitMagicNumbers RequireInterpolationOfMetachars) ## no critic (RequireExtendedFormatting RequireCheckingReturnValueOfEval) ## no critic (RequireCarping ProhibitParensWithBuiltins RequireBriefOpen) ## no critic (RequireLocalizedPunctuationVars ProhibitBacktickOperators) # keeps reporting this even though I am not matching dots: ## no critic (RequireDotMatchAnything) # I am not changing all these - would take too long to check each one ## no critic (RequireLineBoundaryMatching) use strict; BEGIN { require 5.004 } # 5.004 is required for Win32 use Config; use ExtUtils::MakeMaker 5.16, qw(&WriteMakefile $Verbose prompt); use File::Basename; use Getopt::Long; use File::Spec; use English qw( -no_match_vars ); use warnings; $OUTPUT_AUTOFLUSH = 1; print <<"EOT"; ********** \tRemember to actually *READ* the README file! \tAnd re-read it if you have any problems.\n ********** EOT { # some useful info when debugging problems print "OSNAME: $OSNAME\n"; my @envs = qw(LANG ODBCHOME LD_LIBRARY_PATH DBROOT WINDIR II_SYSTEM DBD_ODBC_UNICODE); foreach (@envs) { print "$_: ", ($ENV{$_} ? $ENV{$_} : ''), "\n"; } print "Perl: $]\n"; print "ExtUtils::MakeMaker: $ExtUtils::MakeMaker::VERSION\n"; } # the following now redundant since we found constants (see end) which allows # us to override the constants. ###### ###### if INC is set on the command line ExtUtils::MakeMaker will not override it ###### or allow us to add to it so you can never find the DBI header files. ###### It is pointless to continue ###### see http://www.mail-archive.com/makemaker@perl.org/msg02680.html ###### see http://www.nntp.perl.org/group/perl.dbi.users/2008/09/msg33278.html ###### http://www.perlmonks.org/?node_id=714150 #####my $INC_argc; #####for (my $n = 0; $n <= $#ARGV; $n++) { ##### if ($ARGV[$n] =~ /^INC=/) { ##### $INC_argc = $n; ##### print "\nCannot generate successful Makefile - " . ##### "INC is set on the command line.\n\n"; ##### prompt("Press return to see possible solutions:"); ##### print <<"EOT"; ##### #####INC has been set on the command line as: $ARGV[$n] ##### #####and there is no way this Makefile.PL can add the path to the DBI #####header files to INC. If you manually added INC to the command line #####remove it. If you are building from the CPAN shell perhaps your #####makepl_arg is set to include INC (as it is with Strawberry Perl). You #####need to remove INC from makepl_arg with: ##### #####cpan> o conf ##### #####Look for makepl_arg and set makepl_arg to whatever text it is minus the INC #####setting. You set makepl_arg with: ##### #####cpan> o conf makepl_arg 'text of makepl_arg - INC' ##### #####Alternatively, if you are using Strawberry perl you can just install #####DBD::ODBC outside of the CPAN shell. ##### #####If you cannot remove INC from the command line then you will need to hand #####edit the generated Makefile. Search it for : ##### ##### # MakeMaker 'CONFIGURE' Parameters: ##### # INC => q[-I. -I/some_path/DBI] ##### #####then add the -I/some_path/DBI to INC where it is set later in the #####Makefile. ##### #####EOT ##### #exit 0; ##### } #####} my %opts = ( ## no critic (RequireInterpolationOfMetachars) NAME => 'DBD::ODBC', VERSION_FROM => 'ODBC.pm', # See note below on CONFIGURE. This used to work when we could rely on # CONFIGURE being after PREREQ_PM but that is not the case now so the # line below does nothing since 6.33 of MakeMaker. BUILD_REQUIRES => { "Test::Simple" => 0.90, # actually Test::More pkg in T::S dist, "Test::Output" => 1.031 }, PREREQ_PM => { "DBI" => 1.609 }, clean => { FILES => 'ODBC.xsi dbdodbc.h' }, dist => { DIST_DEFAULT => 'clean distcheck tardist', PREOP => '$(MAKE) -f Makefile.old distdir', COMPRESS => 'gzip -v9', SUFFIX => 'gz' }, OBJECT => '$(O_FILES)', DEFINE => q{}, ); # just save the open suse guys patching the following: if ($ENV{RPM_OPT_FLAGS}) { $opts{OPTIMIZE} = $ENV{RPM_OPT_FLAGS}; } my $eumm = $ExtUtils::MakeMaker::VERSION; $eumm =~ tr/_//d; $opts{LICENSE} = 'perl' if $eumm >= 6.3002; $opts{NO_META} = 1 if $eumm >= 6.10; if ($eumm >= 5.43) { $opts{AUTHOR} = 'Tim Bunce, Jeff Urlwin, Martin J. Evans mailto:dbi-users@perl.org'; $opts{ABSTRACT} = 'ODBC driver for the DBI module.'; $opts{CAPI} = 'TRUE' if $Config{archname} =~ /-object\b/i; # Never set PREREQ_FATAL to true - it is flawed # - see http://wiki.cpantesters.org/wiki/CPANAuthorNotes #$opts{PREREQ_FATAL} = 1; # See # http://www.mail-archive.com/cpan-testers-discuss%40perl.org/msg00076.html # In ExtUtils::MakeMaker 6.32 and earlier CONFIGURE was run after PREREQ_PM # so we could safely require DBI::DBD here and PREREQ_PM would fail first # if DBI was not installed. Since 6.33 CONFIGURE is run before PREREQ_PM # so now the require below fails and if we do not exit 0 without generating # a Makefile cpan-testers will fail us if DBI is not found. $opts{CONFIGURE} = sub { eval {require DBI::DBD;}; if ($@) { warn $@; exit 0; } else { my $dbi_arch_dir = DBI::DBD::dbd_dbi_arch_dir(); if (exists($opts{INC})) { return {INC => "$opts{INC} -I$dbi_arch_dir"}; } else { return {INC => "-I$dbi_arch_dir"}; } } }; } if ($eumm >= 5.48) { $opts{PREREQ_PRINT} = 1; } my $opt_g = 0; # build debug my $opt_o = q{}; # odbc home overrides ODBCHOME my $opt_u = undef; # build unicode version my $opt_e = undef; # easysoft my $opt_x = undef; # prefer unixODBC over iODBC my $opt_w = undef; # enable -Wall (gcc only) $opt_u = 1 if $ENV{DBD_ODBC_UNICODE}; my @options = ("g!" => \$opt_g, "o=s" => \$opt_o, "u!" => \$opt_u, "e!" => \$opt_e, "x!" => \$opt_x, "w!" => \$opt_w); my %options = @options; Getopt::Long::GetOptions(@options) or die "Invalid arguments\n"; print "Command line options:\n", (map {" $_ = " . (defined(${$options{$_}}) ? ${$options{$_}} : 'undef') . "\n"} keys %options), "\n"; if (($ENV{LANG} || q{}) =~ m/utf-?8/i) { print <<"EOT"; Your LANG environment variable is set to "$ENV{LANG}"\a This is known to cause problems in some perl installations - even stopping this Makefile.PL from running without errors. If you have problems please try re-running with LANG unset or with the utf part of LANG removed. EOT sleep 4; } if ($Config{useithreads}) { print <<'EOT'; You are using a Perl configured with threading enabled. Please read the warnings in DBI about this. EOT if ($OSNAME ne 'MSWin32') { print <<'EOT'; You should also be aware that on non-Windows platforms ODBC drivers come in two forms, thread-safe and non-thread-safe drivers and you may need to make sure you are using the right one. EOT } # see rt 46944 for why the following was suggested to be commented # out which I rejected as PERL_MM_USE_DEFAULT can be used prompt("Press return to continue..."); } print "Overriding ODBC Directory with command line option: $opt_o\n" if $opt_o ; if ($opt_g) { print "Setting debug options!\n"; if ($OSNAME eq 'MSWin32') { $opts{OPTIMIZE} = '/Zi'; } else { $opts{OPTIMIZE} = '-g -O0'; } } if ($opt_w) { $opts{CCFLAGS} = '-Wint-to-pointer-cast -fno-strict-aliasing -Wall -Werror=format-security'; } if (defined($opt_u) && $opt_u) { $opts{DEFINE} .= ' -DWITH_UNICODE'; require 5.008001; } my @known_drivers = sort { $a cmp $b } ( 'Microsoft ODBC', 'unixodbc', 'iodbc', 'empress', 'intersolve', 'sapdb', 'adabas', 'udbc', 'solid', 'informix', 'ingrescli', ## clach04 Ingres CLI name? ); my $odbchome_specified; $odbchome_specified = 1 if defined($opt_o) || defined($ENV{ODBCHOME}); # # create the dbdodbc.h file with all the required includes per driver or # driver manager # my $sqlhfh; open($sqlhfh, q/>/, 'dbdodbc.h') || die "Can't open dbdodbc.h: $!\n"; print {$sqlhfh} qq{/* Do not edit this file. It is automatically written by Makefile.PL.\n}; print {$sqlhfh} qq{ Any changes made here will be lost. \n*/\n\n}; print {$sqlhfh} qq{#undef WORD /* from perly.y */\n}; if ($OSNAME eq 'MSWin32') { my $extrainc = q{}; $extrainc = ";$Config{incpath}\\mfc" if $Config{cc} eq 'bcc32'; $opts{SKIP} = ['processPL']; if (!defined($opt_u) || $opt_u) { $opts{DEFINE} .= " -DWITH_UNICODE"; require 5.008001; } # stop all those strcpy etc warnings: $opts{DEFINE} .= " -D_CRT_SECURE_NO_DEPRECATE"; $opts{INC} = "$extrainc"; $opts{LIBS} = ["ODBC32.LIB"]; $opts{macro}->{EXTRALIB} = 'ODBC32.LIB'; print {$sqlhfh} "#include \n"; print {$sqlhfh} <<'EOT'; #ifdef WIN32 # ifndef _WIN64 # ifndef SQLLEN # define SQLLEN SQLINTEGER # endif # ifndef SQLULEN # define SQLULEN SQLUINTEGER # endif # ifndef SQLSETPOSIROW # define SQLSETPOSIROW SQLUSMALLINT # endif # endif #endif EOT print {$sqlhfh} "#include \n#include \n#include \n"; } elsif ($opt_e) { my $odbchome = "/usr/local/easysoft/unixODBC"; my $odbclibdir = "/usr/local/easysoft/unixODBC/lib"; my $odbcincdir = "/usr/local/easysoft/unixODBC/include"; $opts{INC} = "-I. -I$odbcincdir"; $opts{DEFINE} .= "-DSIZEOF_LONG=8 -DBUILD_REAL_64_BIT_MODE"; my $soext = $Config{so}; # extension for share objects my $dlext = $Config{dlext}; # extension for dynamically loaded modules my $arext = $Config{lib_ext}; # this is _a now, library extension my @libs = glob "$odbclibdir/libodbc.*"; my @ilibs = grep { /\.($soext|$dlext|a)$/ } @libs; if (scalar(@ilibs) == 0) { die "Cannot find unixODBC"; } my $ilibpath = $ilibs[0]; # if both .so and .a, pick based on LINKTYPE? my $ilibname = basename($ilibpath); if ($ilibname =~ /^odbc/) { # no "lib" prefix $opts{LIBS} = q{}; $opts{dynamic_lib} = { OTHERLDFLAGS => "$ilibpath" }; } else { if ($ilibname =~ /^lib(odbc[^.]*?)\.\w+$/) { # remove lib prefix and .so suffix so "-l" style link can be used $ilibname = $1; $opts{LIBS} = "-L$odbclibdir -l$ilibname"; } else { # cannot use "-l" style link so specify pull path $opts{LIBS} = q{}; $opts{dynamic_lib} = { OTHERLDFLAGS => "$ilibpath" }; } warn "Warning: LD_LIBRARY_PATH doesn't include $odbclibdir\n" unless (exists($ENV{LD_LIBRARY_PATH}) && ($ENV{LD_LIBRARY_PATH} =~ /\Q$odbclibdir/)); } print {$sqlhfh} qq{#include \n}; print {$sqlhfh} qq{#include \n}; print {$sqlhfh} qq{#include \n}; print {$sqlhfh} qq{#include \n}; } else { my $myodbc = q{}; my $myodbc_version = -1; # cannot believe the following still works as $odbchome is checked later # to be a directory - so commented out - there are other ways to do # this anyway # for Adabas #$ENV{ODBCHOME} = $ENV{DBROOT} # if $ENV{DBROOT} && -f "$ENV{DBROOT}/lib/odbclib.a"; print "Overriding ODBC Directory with command line option: $opt_o\n" if $opt_o ; my $odbchome= $opt_o || $ENV{ODBCHOME}; my ($odbcincdir, $odbclibdir); $odbchome = VMS::Filespec::unixify($odbchome) if $OSNAME eq 'VMS'; # if we haven't got odbchome set try and find a driver, driver manager. if (!$odbchome) { if ($ENV{WINDIR} && $OSNAME eq 'cygwin') { ## per patches from Teun Burgers #my $tmp_odbchome = $ENV{WINDIR}; #$tmp_odbchome =~ s/^([A-Za-z]):*$/\/\/$1/; #$tmp_odbchome =~ s/\\/\//g; #$odbchome = $tmp_odbchome if (-e "$tmp_odbchome/odbc.ini") chomp($odbchome = `cygpath \$WINDIR`); } elsif (-f '/opt/sapdb/interfaces/odbc/lib/libsqlod.a') { $odbchome = '/opt/sapdb/interfaces/odbc/'; } } if ($OSNAME eq 'cygwin'){ #Trade cygwin as MSWin. my $extrainc = q{}; $extrainc = ";$Config{incpath}\\mfc" if $Config{cc} eq 'bcc32'; $opts{SKIP} = ['processPL']; if (!defined($opt_u) || $opt_u) { $opts{DEFINE} .= " -DWITH_UNICODE"; require 5.008001; } $opts{INC} = "$extrainc"; $opts{LIBS} = ["ODBC32.LIB"]; $opts{macro}->{EXTRALIB} = 'ODBC32.LIB'; print {$sqlhfh} "#include \n"; print {$sqlhfh} "#include \n#include \n#include \n"; } elsif ($OSNAME !~ /MSWin/ ) { # look for Ingres' driver manager first on the basis of if you # have ingres you probably want to use it. Also Ingres ships with # libs that look like unixODBC/iODBC if ($ENV{II_SYSTEM}) { my $home = File::Spec->catdir($ENV{II_SYSTEM}, 'ingres'); my $inc = File::Spec->catdir($home, 'files'); if ((-f File::Spec->catfile($inc, 'sql.h')) && (-f File::Spec->catfile($home, 'bin', 'iiodbcadmin'))) { $odbchome = File::Spec->catdir($ENV{II_SYSTEM}, 'ingres'); $odbclibdir = File::Spec->catdir($odbchome, 'lib'); $odbcincdir = $inc; $myodbc = 'ingrescli'; } } # try and find unixODBC's odbc_config binary if (!$myodbc) { ($myodbc, $myodbc_version, $odbchome, $odbcincdir, $odbclibdir) = unixodbc_config($odbchome); } if (!$myodbc) { # try and find iODBC's iodbc_config binary ($myodbc, $myodbc_version, $odbchome, $odbcincdir, $odbclibdir) = iodbc_config($odbchome); } if (!$odbchome) { print "odbc_config not found - " . "ok, there are other things I can do\n"; print "Still trying to guess ODBCHOME - looking for headers now\n"; $odbchome = find_dm_hdr_files(); } } unless ($odbchome) { print <<'EOT'; The DBD::ODBC module needs to link with an ODBC 'Driver Manager'. (The Driver Manager, in turn, needs one or more database specific ODBC drivers. The DBD::ODBC module does _not_ include any ODBC drivers!) You need to indicate where your ODBC Driver Manager is installed. You can do this by: o setting the ODBCHOME environment variable o running 'perl Makefile.PL -o odbcdir' o adding path to odbc_config/iodbc_config to PATH If you do not have an ODBC Driver Manager you should try to get hold of the unixODBC packages for your system or build it from source (see http://www.unixodbc.org). If you install driver manager packages you need to include the "XXX-dev" package which includes the C header files. EOT # stop cpan testers from reporting a failure when a driver manager # library is not installed. Do not know if Devel::CheckLib (when # it is released) is going to help here. # see http://www.mail-archive.com/cpan-testers-discuss%40perl.org/msg00043.html exit 0; } die "odbc home ($odbchome) does not refer to a directory.\n" unless -d $odbchome; warn "Using ODBCHOME $odbchome\n"; # $odbcincdir and $odbclibdir will only be set at this point if we # found odbc_config/iodbc_config - otherwise we only have a $odbchome # either from -o, ODBCHOME or by finding sql*.h header files somewhere. if (!$odbcincdir) { $odbcincdir = File::Spec->catdir($odbchome, 'include'); } if (!$odbclibdir) { $odbclibdir = File::Spec->catdir($odbchome, 'lib'); } $opts{INC} = "-I."; # cygwin patch $opts{INC} .= " -I/usr/include/w32api" if $OSNAME eq 'cygwin'; # TO_DO all this needs to move until later # my $lib_d1 = "$odbchome/lib"; # my $lib_d2 = "$odbchome/dlls"; # my $libs = "odbc"; # $opts{LIBS} = " -L$lib_d1 -R$lib_d1 -L$lib_d2 -R$lib_d2 -l$libs"; my $soext = $Config{so}; # extension for share objects my $dlext = $Config{dlext}; # extension for dynamically loaded modules my $arext = $Config{lib_ext}; # this is _a now, library extension # fix to avoid foo..ext on many systems. $arext =~ s/^\.//; # Try to work out which driver manager is being used. # # NOTE: if $myodbc is already set we found a driver manager config binary # above and have already set $odbchome, $odbcincdir and $odbclibdir. # # NOTE: we look for iodbc first because both it and unixODBC supply a # libodbc.xx but only iodbc supplies a libiodbc.xx. As a result the only # reliable way of telling one from the other is to look for libiodbc.xx # first. # NOTE: -x says we prefer unixODBC over iODBC # This is to cater for those insane situations when someone had libiodbc # and unixODBC binary packages installed but unixODBC dev package # e.g. some people on Debian/Ubuntu # # PERL_DBD_ODBC_PREFER_UNIXODBC added for Caelum/Rafael for automated # installs/upgrades from cpan clients if (!$myodbc && ($opt_x || $ENV{PERL_DBD_ODBC_PREFER_UNIXODBC})) { ($myodbc, $odbclibdir) = find_unixodbc($odbchome); } else { ($myodbc, $odbclibdir) = find_iodbc($odbchome) if !$myodbc; ($myodbc, $odbclibdir) = find_unixodbc($odbchome) if !$myodbc; } $myodbc = 'Microsoft ODBC' if (!$myodbc && (-e "$odbchome/system/odbc32.dll" or -e "$odbchome/system32/odbc32.dll" or -e "$odbchome/odbc32.dll")); $myodbc = 'empress' if !$myodbc && glob "$odbchome/lib/libempodbc.*"; $myodbc = 'intersolve' if !$myodbc && -f "$odbchome/include/qeodbc.h"; $myodbc = 'sapdb' if !$myodbc && -f "$odbchome/lib/libsqlod.$arext"; $myodbc = 'adabas' if (!$myodbc && $ENV{DBROOT} && ($odbchome eq $ENV{DBROOT}) && -f "$odbchome/lib/odbclib.$arext"); $myodbc = 'udbc' if !$myodbc && -f "$odbchome/lib/libudbc.$arext"; $myodbc = 'solid' if !$myodbc && -f "$odbchome/lib/libsolcli.$dlext"; # JL 2002-12-16: This test is accurate on Unix (Solaris 7) with IBM # Informix ClientSDK 2.80.UC1, which includes IBM Informix CLI # v3.81.000, an ODBC 3.x driver. # NB: The correct value for $ODBCHOME is $INFORMIXDIR. $myodbc = 'informix' if !$myodbc && -f "$odbchome/lib/cli/libifcli.$dlext"; if (!$myodbc) { local($LIST_SEPARATOR) = ", "; my($list) = "@known_drivers"; $list =~ s/^(.{30,70})\s/$1\n\t/gmo; die qq{I cannot find an ODBC driver manager that I recognize.\n...And I know about these drivers:\n$list\n}; } warn "\nThis looks like a $myodbc type of driver manager.\n"; # some specific checks for incompatibilities if (defined($opt_u) && $opt_u) { if (-e File::Spec->catfile($odbcincdir, 'sql.h')) { my $fh; open($fh, q/; my @found = grep {/iODBC driver manager/i} @lines; if (scalar(@found)) { die "\n\nDBD::ODBC does not support unicode with iODBC and this looks like iODBC. The iODBC driver manager expects wide characters to be 4 bytes long and DBD::ODBC wants wide characters to be UTF16.\nEither\no) Rerun without the -u switch\no) complain to the producer of your ODBC driver manager\no) get another ODBC driver manager (like unixODBC).\n\n"; } close $fh or warn "Failed to close sql.h - $!"; } } if ($myodbc eq 'Microsoft ODBC') { print "\nBuilding for Microsoft under Cygwin\n"; $opts{LIBS} = "-L/usr/lib/w32api -lodbc32"; print {$sqlhfh} "#include \n"; print {$sqlhfh} "#include \n"; print {$sqlhfh} "#include \n"; print {$sqlhfh} "#include \n"; print {$sqlhfh} "#undef WIN32\n"; $opts{dynamic_lib} = {OTHERLDFLAGS => "-lodbc32"}; } elsif ($myodbc eq 'ingrescli') { $opts{INC} .= " -I$odbcincdir"; $opts{LIBS} = "-L$odbclibdir -liiodbc.1"; print {$sqlhfh} qq{typedef void* PTR;\n#include \n#include \n}; } elsif ($myodbc eq 'iodbc') { my @libs = glob "$odbclibdir/*iodbc*.*"; my @ilibs = grep { /\.($dlext|$soext|$arext)/ } @libs; if (scalar(@ilibs) == 0) { die "That's odd, I can't see any iodbc libs in $odbclibdir. " . "This is all I found:\n" . join(q{,}, @libs) . "\n" . "Perhaps you need to install the iODBC development " . "package, often called libiodbc-dev."; } # Note: we use DEFINE not INC for iODBC so we don't get its config.h my $ilibpath = $ilibs[0]; # if both .so and .a, pick based on LINKTYPE? my $ilibname = basename($ilibpath); $opts{DEFINE} .= " -I$odbcincdir"; if ($ilibname =~ /^iodbc/) { # no "lib" prefix $opts{LIBS} = q{}; $opts{dynamic_lib} = { OTHERLDFLAGS => "$ilibpath" }; } else { if ($ilibname =~ /^lib(iodbc[^.]*?)\.\w+$/) { # remove lib prefix and .so suffix so "-l" style link can be used $ilibname = $1; $opts{LIBS} = "-L$odbclibdir -l$ilibname"; } else { # cannot use "-l" style link so specify pull path $opts{LIBS} = q{}; $opts{dynamic_lib} = { OTHERLDFLAGS => "$ilibpath" }; } warn "Warning: LD_LIBRARY_PATH doesn't include $odbchome/lib\n" if (!defined($ENV{LD_LIBRARY_PATH})) || ($ENV{LD_LIBRARY_PATH} =~ /\Q$odbclibdir/); } if (-x "$odbchome/bin/iodbc-config") { my $cf = `$odbchome/bin/iodbc-config --cflags 2>&1`; if ($cf =~ /\-I/) { chomp $cf; $cf =~ s/\n//g; print qq/Adding iodbc_config --cflags "$cf" to CC line\n/; $opts{DEFINE} .= " $cf"; } } print {$sqlhfh} qq{#include \n}; print {$sqlhfh} qq{#include \n}; print {$sqlhfh} qq{#include \n}; } elsif ($myodbc eq 'unixodbc') { # if we find odbcinst, output useful info about this version of unixODBC # and store unixODBC version print "Looking for odbcinst\n"; if (-x "$odbchome/bin/odbcinst") { print " Found odbcinst in $odbchome/bin\n"; my $j = `$odbchome/bin/odbcinst -j 2>&1`; print " odbcinst -j reports:\n\n$j\n" if $j; if ($j =~ /^unixODBC ([\d\.]+).*/ ) { $myodbc_version = $1; } print "Please note these files as they are where you define your ODBC drivers and data sources.\n\n"; } else { print " odbcinst not found - ok, I can deal with that.\n"; } # if we find odbc_config add --cflags output to CC line print "Looking for odbc_config to get cflags\n"; if (-x "$odbchome/bin/odbc_config") { #my @args = qw(--prefix --include-prefix --lib-prefix --version --odbcversion); #for my $oca (@args) { # my $c = `$odbchome/bin/odbc_config $oca 2>&1`; # chomp $c; # if ($c) { # print "odbc_config $oca = $c\n"; # } #} #print "\n"; my $cf = `$odbchome/bin/odbc_config --cflags 2>&1`; if ($cf =~ /\-D/) { chomp $cf; $cf =~ s/\n//g; print qq/Adding odbc_config --cflags "$cf" to CC line\n/; $opts{DEFINE} .= " $cf"; } } else { print " odbc_config not found - ok\n"; } my ($ilibpath, $ilibname, @ilibs); my @libs = glob "$odbclibdir/libodbc.*"; # prefer dynamic linking if (@ilibs = grep /\.($soext)/, @libs) { # remove any shared object version off the end e.g. libodbc.so.2 $ilibpath = $ilibs[0]; $ilibpath =~ s/(.*\.$soext).*$/$1/; $ilibname = basename($ilibpath); if ($ilibname =~ /^lib(odbc[^.]*?)\.\w+$/) { # remove lib prefix and .so suffix so "-l" style link can be used $ilibname = $1; $opts{LIBS} = "-L$odbclibdir -l$ilibname"; } else { # cannot use "-l" style link so specify pull path $opts{LIBS} = q{}; $opts{dynamic_lib} = { OTHERLDFLAGS => "$ilibpath" }; } warn "Warning: LD_LIBRARY_PATH=", ($ENV{LD_LIBRARY_PATH} ? $ENV{LD_LIBRARY_PATH} : ""), " doesn't include $odbclibdir\n" unless (exists($ENV{LD_LIBRARY_PATH}) && ($ENV{LD_LIBRARY_PATH} =~ /\Q$odbclibdir/)); } elsif (@ilibs = grep /\.($dlext)/, @libs) { $ilibpath = $ilibs[0]; $ilibpath =~ s/(.*\.$dlext).*$/$1/; $ilibname = basename($ilibpath); if ($ilibname =~ /^lib(odbc[^.]*?)\.\w+$/) { # remove lib prefix and .so suffix so "-l" style link can be used $ilibname = $1; $opts{LIBS} = "-L$odbclibdir -l$ilibname"; } else { # cannot use "-l" style link so specify pull path $opts{LIBS} = q{}; $opts{dynamic_lib} = { OTHERLDFLAGS => "$ilibpath" }; } } elsif (@ilibs = grep /\.($arext)$/, @libs) { $ilibpath = $ilibs[0]; $ilibname = basename($ilibpath); #$opts{LINKTYPE} = 'static'; #$opts{linkext} = {LINKTYPE => 'static'}; $opts{dynamic_lib} = { OTHERLDFLAGS => "$ilibpath" }; # you can build against a static unixODBC lib but it still needs # to dynamically load (dl_open) drivers so we need libltdl: $opts{LIBS} = "-lltdl"; } else { die "That's odd, I can't see any unixodbc libs in $odbchome.\n" . "This is all I found:\n" . join(q{,}, @libs) . "\n" . "Perhaps you need to install the unixODBC development " . "package, often called unixodbc-dev.\n"; } $opts{DEFINE} .= " -I$odbcincdir"; print {$sqlhfh} qq{#include \n}; print {$sqlhfh} qq{#include \n}; print {$sqlhfh} qq{#include \n}; print {$sqlhfh} qq{#include \n}; } elsif ($myodbc eq 'intersolve') { print {$sqlhfh} qq{#include \n}; if (-f "$odbcincdir/sql.h") { print "You seem to have the official header files.\n"; $opts{INC} .= " -I$odbcincdir"; print {$sqlhfh} qq{#include \n#include \n#include \n}; } else { # This is common on Solaris print "You don't seem to have the official header files,\n"; print "so I'll use the iODBC ones instead.\n"; $opts{INC} .= " -I$odbcincdir -Iiodbcsrc"; print {$sqlhfh} qq{#include \n#include \n}; } } elsif ($myodbc eq 'empress') { $opts{INC} .= " -I$odbcincdir"; print {$sqlhfh} qq{#include \n}; print {$sqlhfh} qq{#include \n#include \n}; $opts{LIBS} = "-L$odbclibdir -R$odbclibdir -lempodbc"; } elsif ($myodbc eq 'sapdb') { print {$sqlhfh} "#include \n"; print {$sqlhfh} "#include \n"; print {$sqlhfh} "#include \n"; print {$sqlhfh} "#define HENV SQLHENV\n"; print {$sqlhfh} "#define HDBC SQLHDBC\n"; print {$sqlhfh} "#define HSTMT SQLHSTMT\n"; print {$sqlhfh} "#define DBD_ODBC_NO_SQLDRIVERCONNECT\n"; print {$sqlhfh} qq{#define DBD_ODBC_NO_DATASOURCES\n}; $opts{INC} .= " -I$odbchome/incl"; $opts{LDFROM} = "\$(OBJECT) $odbchome/lib/libsqlod.a"; } elsif ($myodbc eq 'adabas') { print {$sqlhfh} "#define FAR \n#define EXPORT \n#define CALLBACK \n"; print {$sqlhfh} "#include \n"; print {$sqlhfh} "#include \n"; print {$sqlhfh} "#include \n"; $opts{INC} .= " -I$odbchome/incl"; $opts{LIBS} = "-L$odbclibdir -lsqlrte -lsqlptc"; $opts{LDFROM} = "\$(OBJECT) $odbclibdir/odbclib.a"; } elsif ($myodbc eq 'udbc') { print {$sqlhfh} qq{#include \n}; $opts{INC} .= " -I$odbcincdir"; $opts{LIBS} = "-L$odbclibdir -R$odbclibdir -ludbc"; } elsif ($myodbc eq 'solid') { $opts{INC} .= " -I$odbcincdir"; $opts{LIBS} = "-L$odbclibdir -lsolcli"; # Solid does not support DataSources print {$sqlhfh} qq{#define DBD_ODBC_NO_DATASOURCES\n}; # Solid does not support DataSources print {$sqlhfh} qq{#define DBD_ODBC_NO_SQLDRIVERCONNECT\n}; print {$sqlhfh} qq{#include \n}; } elsif ($myodbc eq 'informix') { # JL 2002-12-16: See comments above for environment details. $opts{INC} = "-I$odbchome/incl/cli $opts{INC}"; $opts{LIBS} = "-L$odbchome/lib/cli -lifcli -lifdmr"; $opts{DEFINE} .= " -DNO_WIN32"; # Applies to Unix only, of course print {$sqlhfh} qq{#include \n}; print {$sqlhfh} qq{#include \n}; } else { print <<'EOT'; *** WARNING *** Unknown driver or driver manager. Using default build process. This will almost certainly fail at some point. In which case you will need to edit/hack the Makefile.PL to suit your needs. (Specifically to locate your odbc library and header files.) EOT print {$sqlhfh} qq{#include \n#include \n}; } } print {$sqlhfh} qq{\n}; print {$sqlhfh} qq{#include "fixup_c.h"\n}; print {$sqlhfh} qq{\n}; close($sqlhfh) or die "Failed to close dbdodbc.h - $!"; print "\n"; if ($OSNAME eq 'darwin') { $opts{LD} = $Config{ld} . ' -framework CoreFoundation'; # some older versions of darwin had a problem with iODBC which leads to # Symbol not found: _SQLGetPrivateProfileString # SQLGetPrivateProfileString is in libiodbcinst.a my $osver = `uname -r`; if ($osver && ($osver =~ /^8/)) { $opts{LIBS} .= ' -L/usr/lib -liodbcinst'; } } my $rv = WriteMakefile(%opts); local($WARNING)=0; print "Warning: not all required environment variables are set.\n" unless ($ENV{DBI_DSN} && $ENV{DBI_USER} && $ENV{DBI_PASS}); print "Warning: DBI_DSN ($ENV{DBI_DSN}) doesn't start with 'dbi:ODBC:'\n" if ($ENV{DBI_DSN} && $ENV{DBI_DSN} !~ m/^dbi:ODBC:/); print "\n"; if (!exists($ENV{DBI_DSN}) || !exists($ENV{DBI_USER}) || !exists($ENV{DBI_PASS})) { print "Warning: Will not be able to run tests as you have not defined\n", "all of DBI_DSN, DBI_USER and DBI_PASS environment variables.\n"; } else { print <<"EOT"; The DBD::ODBC tests will use these values for the database connection: DBI_DSN=$ENV{DBI_DSN} e.g. dbi:ODBC:demo DBI_USER=$ENV{DBI_USER} DBI_PASS=$ENV{DBI_PASS} EOT } # # find the files in @files in $path returning 1 if they all exist, 0 otherwise # sub files_exist { my ($path, @files) = @_; my $found = 1; foreach my $file (@files) { my $f = File::Spec->catfile($path, $file); if (! -f $f) { $found = 0; last; } } return $found; } # # Try and find out from odbc_config where unixODBC is # sub unixodbc_config { my $odbchome = shift; # may not be set print "Looking for odbc_config in : ", ($odbchome ? $odbchome : "nowhere"), "\n"; my ($inc, $lib, $home, $configbin, $odbc_config_v); # unixODBC - would have liked to use odbc_config but it did not # exist until 2.2.11 and it was broken wrt --cflags in 2.2.11/2.2.12 # i.e. --cflags did not include -I/xxx/yyy if ($odbchome) { $configbin = "$odbchome/bin/odbc_config"; print "Looking for odbc_config at $configbin\n"; $odbc_config_v = `$configbin --version 2>&1`; } if (!defined($odbc_config_v) || ($odbc_config_v !~ /^(\d\.)+/)) { print "Looking for odbc_config in (PATH) $ENV{PATH}\n"; $configbin = 'odbc_config'; $odbc_config_v = `$configbin --version 2>/dev/null`; if (!defined($odbc_config_v) || ($odbc_config_v !~ /^(\d\.)+/)) { print " odbc_config not found\n"; return; } if ($odbchome) { my $warning = <<"EOT"; ***** WARNING ***** You provided ODBCHOME ($odbchome) which has no odbc_config (not unusual for older unixODBCs) but we've found an odbc_config on your PATH. It is unlikely the odbc_config specifications are going to match your specified ODBCHOME so this script is going to ignore your specified ODBCHOME. If you don't like this do something to remove odbc_config from your PATH or ensure there is an odbc_config in your provided ODBCHOME. EOT warn $warning; prompt("Press return to continue..."); } } print " Found odbc_config (via $configbin) version $odbc_config_v\n"; my @hdrstofind = ('sql.h', 'sqlext.h', 'sqltypes.h'); push @hdrstofind, 'sqlucode.h' if $opt_u; $home = `$configbin --prefix 2>&1`; chomp $home; if (!defined($home)) { print " cannot find --prefix from odbc_config\n"; return; } print " odbc_config reports --prefix=$home\n"; # if we cannot find the --prefix dir someone perhaps someone has built # it in one place and moved it to another. Try path to odbc_config if (! -e "$home") { my $ocp = `which $configbin`; if ($ocp) { chomp $ocp; if ($ocp =~ /$configbin$/) { $ocp =~ s/bin\/$configbin$//; $home = $ocp; } } } $inc = `$configbin --include-prefix`; chomp $inc; print " odbc_config reports --include-prefix=$inc\n"; $lib = `$configbin --lib-prefix`; chomp $lib; print " odbc_config reports --lib-prefix=$lib\n"; my $found_hdrs = 0; # try with --include-prefix if (defined($inc) && (-e $inc) && files_exist($inc, @hdrstofind)) { print " ODBC INC dir set to $inc via odbc_config\n"; $found_hdrs++; } if (!$found_hdrs && (-e "$home")) { # try with --prefix + include $inc = File::Spec->catdir($home, 'include'); if ((-e "$inc") && files_exist($inc, @hdrstofind)) { print " ODBC INC dir set to $inc via odbc_config\n"; $found_hdrs++; } } # if (!$found_hdrs) { # my $ocp = `which $configbin`; # if ($ocp) { # chomp $ocp; # if ($ocp =~ /$configbin$/) { # $ocp =~ s/bin\/$configbin$//; # $inc = File::Spec->catdir($ocp, 'include'); # if ((-e "$inc") && files_exist($inc, @hdrstofind)) { # print " ODBC INC dir set to $inc from path to $configbin\n"; # $found_hdrs++; # } # } # } # } if (!$found_hdrs) { print " but cannot find header files " . join(',', @hdrstofind) . " in that path so ignoring\n"; print "NOTE: Have you installed the unixodbc-dev package\n"; return; } my $found_libdir = 0; if (-e "$lib") { # try with --lib-prefix print " ODBC LIB dir set to $lib via odbc_config\n"; $found_libdir++; } if (!$found_libdir && -e "$home") { # try with --prefix + lib $lib = File::Spec->catdir($home, 'lib'); if (-e "$lib") { print " ODBC LIB dir set to $lib from odbc_config\n"; $found_libdir++; } } # if (!$found_libdir) { # my $ocp = `which $configbin`; # if ($ocp) { # chomp $ocp; # if ($ocp =~ /$configbin$/) { # $ocp =~ s/bin\/$configbin$//; # $lib = File::Spec->catdir($ocp, 'lib'); # if (-e "$lib") { # print "Found lib dir $lib from path to $configbin"; # $found_libdir++; # } # } # } # } if (!$lib) { print " but cannot find lib dir so ignoring\n"; return; } return ('unixodbc', $odbc_config_v, $home, $inc, $lib); } # # Try and find out from iodbc_config where iODBC is # sub iodbc_config { my $odbchome = shift; # may not be set my ($home, $inc, $lib, $configbin, $iodbc_config_v); if ($odbchome) { $configbin = "$odbchome/bin/iodbc-config"; print "Looking for iodbc-config at $configbin\n"; $iodbc_config_v = `$configbin --version 2>&1`; } if (!defined($iodbc_config_v) || ($iodbc_config_v !~ /^(\d\.)+/)) { print "Looking for iodbc-config in PATH $ENV{PATH}\n"; $configbin = 'iodbc-config'; $iodbc_config_v = `$configbin --version 2>/dev/null`; if (!defined($iodbc_config_v) || ($iodbc_config_v !~ /^(\d\.)+/)) { print " iodbc_config not found\n"; return; } } print " Found iodbc-config (via $configbin) version $iodbc_config_v\n"; my $iodbc_ini = `$configbin --odbcini 2>&1`; print " ODBC data sources should be added to $iodbc_ini\n" if ($iodbc_ini); my $iodbc_instini = `$configbin --odbcinstini 2>&1`; print " ODBC drivers should be added to $iodbc_instini\n" if ($iodbc_instini); my @hdrstofind = ('sql.h', 'sqlext.h', 'sqltypes.h'); push @hdrstofind, 'sqlucode.h' if $opt_u; $home = `$configbin --prefix 2>&1`; if (!defined($home)) { print " cannot find --prefix from iodbc_config\n"; return; } chomp $home; print " iodbc-config reports --prefix=$home\n"; $inc = File::Spec->catdir($home, 'include'); $lib = File::Spec->catdir($home, 'lib'); my $found_hdrs = 0; if ((defined($inc)) && (-e $inc) && files_exist($inc, @hdrstofind)) { print " ODBC INC dir set to $inc from iodbc-config\n"; $found_hdrs++; } elsif (-e $home) { # try with --prefix + include $inc = File::Spec->catdir($home, 'include'); if (defined($inc) && (-e $inc) && files_exist($inc, @hdrstofind)) { print " ODBC INC dir set to $inc from iodbc_config\n"; $found_hdrs++; } } if (!$found_hdrs) { print " but cannot find header files " . join(',', @hdrstofind) . " in that path so ignoring\n"; print "NOTE: Have you installed the libiodbc-dev package.\n"; return; } if (-e $lib) { print " ODBC LIB dir set to $lib from iodbc_config/lib\n"; } else { print " but cannot find liob dir so ignoring\n"; return; } return ('iodbc', $iodbc_config_v, $home, $inc, $lib); } # # Try and find ODBC driver manager header files in general areas # sub find_dm_hdr_files { my ($home, $inc, $lib); my @hdrstofind = ('sql.h', 'sqlext.h', 'sqltypes.h'); push @hdrstofind, 'sqlucode.h' if $opt_u; my @paths = ('/usr', '/usr/local', '/usr/pkg', '/usr/local/easysoft/unixODBC'); unshift @paths, $opt_o if $opt_o; unshift @paths, $ENV{ODBCHOME} if $ENV{ODBCHOME}; foreach my $dir(@paths) { my $path = File::Spec->catdir($dir, 'include'); print " trying $path\n"; if (files_exist($path, @hdrstofind)) { print " Found " . join(', ', @hdrstofind) . " in $path\n"; $home = $dir; return $home; } } print <<'EOT'; I cannot find driver manager header files. Perhaps you need to install the unixodbc-dev package or the iodbc-dev package EOT return; } sub find_iodbc { my $home = shift; # will be specified odbc home or one we've found my @dirs; # start with specified dir if there was one push @dirs, "$home/lib" if defined($opt_o) || defined($ENV{ODBCHOME}); # look in perl's libspath as it is more likely to be compatible # (e.g., a lib64 dir) push @dirs, split(' ', $Config{libspath}); # add found odbc home if not added already if (defined($opt_o) || defined($ENV{ODBCHOME})) { push @dirs, "$home"; push @dirs, "$home/lib"; } for my $d(@dirs) { print " Looking for iODBC libs in $d\n"; my @found; if (@found = glob "$d/libiodbc*") { print " Found iODBC libs ", join(",", @found), " in $d\n"; return ('iodbc', $d); } } return; } sub find_unixodbc { my $home = shift; # will be specified odbc home or one we've found my @dirs; # start with specified dir if there was one push @dirs, "$home/lib" if defined($opt_o) || defined($ENV{ODBCHOME}); # look in perl's libspath as it is more likely to be compatible # (e.g., a lib64 dir) push @dirs, split(' ', $Config{libspath}); # add found odbc home if not added already push @dirs, "$home/lib" if !defined($opt_o) && !defined($ENV{ODBCHOME}); # for debian/ubuntu 12 thanks to Maestro(Geoff Darling, mitsi) for finding: push @dirs, "/usr/lib/i386-linux-gnu/"; for my $d(@dirs) { my @found; print " Looking for unixODBC libs in $d\n"; if (@found = glob "$d/libodbc*") { print " Found unixODBC libs ", join(",", @found), " in $d\n"; return ('unixodbc', $d); } } return; } # Following generates # Useless use of private variable in void context at Makefile.PL # but is required no warnings 'void'; $rv; # ==================================================================== package MY; use strict; use Config; use English; sub postamble { return DBI::DBD::dbd_postamble(@_); } sub const_cccmd { my $self = shift; local($_) = $self->SUPER::const_cccmd(@_); # inject the defined local ODBC before default include to ensure # the ODBC driver we want is first if ($OSNAME ne 'MSWin32') { s/-c/-c \$\(DEFINE\)/; } $_; } sub MY::post_constants { my ($self) = shift; ' # make Changes file available as installed pod docs "perldoc DBD::ODBC::Changes" inst_libdbdodbc = ' . File::Spec->catdir($self->{INST_LIB}, 'DBD/ODBC') . ' changes_pm = ' . File::Spec->catfile($self->{INST_LIB}, 'DBD/ODBC', 'Changes.pm') . ' # make FAQ file available as installed pod docs "perldoc DBD::ODBC::FAQ" inst_libdbdodbc = ' . File::Spec->catdir($self->{INST_LIB}, 'DBD/ODBC') . ' faq_pm = ' . File::Spec->catfile($self->{INST_LIB}, 'DBD/ODBC', 'FAQ.pm') . ' # make TO_DO file available as installed pod docs "perldoc DBD::ODBC::TO_DO" inst_libdbdodbc = ' . File::Spec->catdir($self->{INST_LIB}, 'DBD/ODBC') . ' todo_pm = ' . File::Spec->catfile($self->{INST_LIB}, 'DBD/ODBC', 'TO_DO.pm') . ' config :: $(changes_pm) $(faq_pm) $(todo_pm) @$(NOOP) $(changes_pm): Changes $(NOECHO) $(MKPATH) $(inst_libdbdodbc) $(NOECHO) $(RM_F) $(changes_pm) $(CP) Changes $(changes_pm) $(faq_pm): FAQ $(NOECHO) $(MKPATH) $(inst_libdbdodbc) $(NOECHO) $(RM_F) $(faq_pm) $(CP) FAQ $(faq_pm) $(todo_pm): TO_DO $(NOECHO) $(MKPATH) $(inst_libdbdodbc) $(NOECHO) $(RM_F) $(todo_pm) $(CP) TO_DO $(todo_pm) '; } sub constants { my $self = shift; require DBI::DBD; # The problem with stawberry perl is it sets INC on the command line # and that overrides INC in this Makefile unless we set it here. my $old_constants = $self->SUPER::constants(); my $new_constants; foreach my $line ( split(/\n/, $old_constants) ) { if ( $line =~ /^INC = .*strawberry.*/ ) { print qq(Strawberry Perl found; adjusting the INC variable;\n); $line = $line . q( -I) . DBI::DBD::dbd_dbi_arch_dir(); print qq(INC is now $line\n); } $new_constants .= $line . qq(\n); } return $new_constants; } # end. DBD-ODBC-1.61/README.informix0000644000175000017500000000200312254016117014611 0ustar martinmartinREADME for Informix CLI on Unix =============================== To get DBD::ODBC 1.01 to work on Solaris 7, there were a number of changes. To build DBD::ODBC 1.01 with the changes in place, the compilation sequence was: perl Makefile.PL -o $INFORMIXDIR This locates the CLI (ODBC) libraries in $INFORMIXDIR/lib/cli and the header files in $INFORMIXDIR/incl/cli. When testing, ensure that your database is a logged database; DBD::ODBC does not seem to be happy with unlogged databases with no transactional support. If you need to access such databases, you probably need to use DBD::Informix, which is acquainted with the peculiarities of Informix's different database logging modes. Non-Informix drivers such as DBD::ODBC probably can't be expected to oblige with the detailed changes necessary to handle them. Jonathan Leffler Date: 2002-12-16 Platform: Sun Sparc Ultra 10, Solaris 7. Perl: 5.8.0 GCC: 3.2.1 DBI: 1.32 hacked to mimic 1.33 ODBC: IBM Informix ClientSDK 2.80.UC1, CLI 3.81.0000. DBD-ODBC-1.61/README.adabas0000644000175000017500000000255112254016125014200 0ustar martinmartinI don't have Adabas and the contents of this file are now very old. If you can verify or update this file I'd greatly appreciate it. Date: Sat, 27 Sep 1997 17:43:13 +0200 From: Jochen Wiedmann Compiling the DBI sources for Adabas is fairly easy. First you have to set the usual environment variable "DBROOT", that points to the main Adabas installation directory, for example DBROOT=/opt/adabas-pe; export DBROOT (sh or bash) or setenv DBROOT=/opt/adabas-pe (csh or tcsh) Next point is to setup the environment variables that tell the DBI test scripts which DSN to use for the tests: DBI_DSN=dbi:ODBC:MYDB DBI_USER=demo DBI_PASS=demo (I've grabbed the appropriate settings for the widely spread personal edition.) Finally you have to create a file "/usr/spool/sql/config/odbc.ini" (the name is fixed, I did not choose it ...), which describes the connection parameters to use for DSN MYDB, for example [ODBC Data Sources] MYDB [MYDB] ServerDB=MYDB ServerNode=192.168.1.2 # Leave empty for local host I'd be happy to leave this away and use the dsn dbi:ODBC:servernode=192.168.1.2;serverdb=MYDB or something similar, but I didn't get this working. Perhaps someone at SAG can help? Finally do just the usual "perl Makefile.PL", "make", "make test" and "make install". DBD-ODBC-1.61/test_results.txt0000644000175000017500000001013312250310263015376 0ustar martinmartin====================================================================== t/01base................ok t/02simple..............ok 1/65# # Perl v5.9.5 built for MSWin32-x64-multi-thread # Using DBI 1.59 # Using DBD::ODBC 1.20 # Using DBMS_NAME 'Microsoft SQL Server' # Using DBMS_VER '09.00.3042' # Using DRIVER_NAME 'sqlncli10.dll' # Using DRIVER_VER '10.00.1049' # odbc_has_unicode 1 t/02simple..............ok t/03dbatt...............ok 1/29# # N.B. Some drivers (postgres/cache) may return ODBC 2.0 column names for the SQ LTables result-set e.g. TABLE_QUALIFIER instead of TABLE_CAT t/03dbatt...............ok t/05meth................ok t/07bind................ok t/08bind2...............ok t/09multi...............ok t/10handler.............ok t/12blob................ok t/20SqlServer...........ok t/30Oracle..............ok 3/5 skipped: various reasons t/40UnicodeRoundTrip....ok t/41Unicode.............ok t/pod-coverage..........ok 1/1# Test::Pod::Coverage 1.04 required for testing PO D coverage t/pod-coverage..........ok t/pod...................ok 3/3 skipped: various reasons t/rt_38977..............ok t/rt_39841..............ok 1/28# [Microsoft][SQL Server Native Client 10.0]Inval id character value for cast specification (SQL-22018) # 22018 # # NOTE: Your SQL Server ODBC driver has a bug which can describe parameters # in SQL using sub selects incorrectly. In this case a VARCHAR(8) parameter # is described as an INTEGER # # Checking you can work around bug in SQL Server ODBC Driver # Yes you can t/rt_39841..............ok 5/28 skipped: various reasons t/rt_39897..............ok t/rt_43384..............ok 7/9 skipped: various reasons All tests successful, 18 subtests skipped. Files=19, Tests=427, 11 wallclock secs ( 0.00 cusr + 0.00 csys = 0.00 CPU) ====================================================================== t/01base................ok t/02simple..............ok 1/65# # Perl 5.7.8 # osname=linux, osvers=2.6.9-22.0.2.elsmp, archname=i686-linux # Using DBI 1.607 # Using DBD::ODBC 1.21 # Using DBMS_NAME 'Microsoft SQL Server' # Using DBMS_VER '09.00.4035' # Using DRIVER_NAME 'esoobclient' # Using DRIVER_VER '02.00.0000' # odbc_has_unicode 0 t/02simple..............ok t/03dbatt...............ok 1/29# # N.B. Some drivers (postgres/cache) may return ODBC 2.0 column names for the SQLTables result-set e.g. TABLE_QUALIFIER instead of TABLE_CAT t/03dbatt...............ok t/05meth................ok t/07bind................ok t/08bind2...............ok t/09multi...............ok t/10handler.............ok t/12blob................ok t/20SqlServer...........ok 54/65# DBD::ODBC::st execute failed: [unixODBC][Microsoft][ODBC SQL Server Driver]Connection is busy with results for another hstmt (SQL-HY000) at t/20SqlServer.t line 202. # DSN=dbi:ODBC:DSN=XXX;MARS_Connection=yes; # # NOTE: You failed this test because your SQL Server driver # is too old to handle the MARS_Connection attribute. This test cannot # easily skip this test for old drivers as there is no definite SQL Server # driver version it can check. # t/20SqlServer...........ok 1/65 skipped: WARNING: driver does NOT support MARS_Connection t/30Oracle..............ok 3/5 skipped: Oracle tests not supported using Microsoft SQL Server t/40UnicodeRoundTrip....ok 61/62 skipped: Unicode-specific tests disabled - not a unicode build t/41Unicode.............ok 54/55 skipped: Unicode-specific tests disabled - not a unicode build t/pod-coverage..........ok 1/1# Test::Pod::Coverage 1.04 required for testing POD coverage t/pod-coverage..........ok t/pod...................ok 3/3 skipped: Test::Pod 1.00 required for testing POD t/rt_38977..............ok 6/11 skipped: Easysoft OOB t/rt_39841..............ok 25/28 skipped: not SQL Server ODBC or native client driver t/rt_39897..............ok t/rt_43384..............ok 7/9 skipped: Microsoft Access tests not supported using Microsoft SQL Server All tests successful, 160 subtests skipped. Files=19, Tests=430, 15 wallclock secs ( 1.17 cusr + 1.48 csys = 2.65 CPU) ====================================================================== DBD-ODBC-1.61/t/0000755000175000017500000000000013614770375012365 5ustar martinmartinDBD-ODBC-1.61/t/rt_39841.t0000755000175000017500000002047712254015243013745 0ustar martinmartin#!/usr/bin/perl -w -I./t # # Test fix for rt 39841 - problem with SQLDecribeParam in MS SQL Server # use Test::More; use strict; #use Data::Dumper; $| = 1; my $has_test_nowarnings = 1; eval "require Test::NoWarnings"; $has_test_nowarnings = undef if $@; my $tests = 28; $tests += 1 if $has_test_nowarnings; plan tests => $tests; # can't seem to get the imports right this way use DBI qw(:sql_types); #1 use_ok('ODBCTEST'); my $dbh; BEGIN { if (!defined $ENV{DBI_DSN}) { plan skip_all => "DBI_DSN is undefined"; } } END { if ($dbh) { eval { local $dbh->{PrintWarn} = 0; local $dbh->{PrintError} = 0; $dbh->do(q/drop table PERL_DBD_rt_39841a/); $dbh->do(q/drop table PERL_DBD_rt_39841b/); }; } Test::NoWarnings::had_no_warnings() if ($has_test_nowarnings); } $dbh = DBI->connect(); unless($dbh) { BAIL_OUT("Unable to connect to the database $DBI::errstr\nTests skipped.\n"); exit 0; } $dbh->{RaiseError} = 1; my $dbms_name = $dbh->get_info(17); #2 ok($dbms_name, "got DBMS name: $dbms_name"); my $dbms_version = $dbh->get_info(18); #3 ok($dbms_version, "got DBMS version: $dbms_version"); my $driver_name = DBI::neat($dbh->get_info(6)); my ($ev, $sth); SKIP: { skip "not SQL Server", 25 if $dbms_name !~ /Microsoft SQL Server/; skip "not SQL Server ODBC or native client driver", 25 if ($driver_name !~ /SQLSRV32.DLL/oi) && ($driver_name !~ /sqlncli10.dll/oi) && ($driver_name !~ /SQLNCLI>DLL/oi); my $major_version = $dbms_version; eval { local $dbh->{PrintWarn} = 0; local $dbh->{PrintError} = 0; $dbh->do('drop table PERL_DBD_39841a'); $dbh->do('drop table PERL_DBD_39841b'); }; test_1($dbh); # 16 tests test_2($dbh); # 9 tests }; # # A bug in the SQL Server OBDC driver causes SQLDescribeParam to # report the parameter as an integer of column_size 10 instead of # a varchar of column size 10. Thus when you execute with 'bbbbbb' # SQL Server will complain that an unsupported conversion has occurred. # We can work around this by specifically telling DBD::ODBC to bind # as a VARCHAR. # The bug is due to SQL Server rearranging the SQL above to: # select a1 from PERL_DBD_38941a where 1 = 2 # and it should have run # select b2 from PERL_DBD_38941b where 1 = 2 # sub test_1 { $dbh = shift; my $sth; eval { $dbh->do('create table PERL_DBD_39841a (a1 integer, a2 varchar(20))'); $dbh->do('create table PERL_DBD_39841b (b1 double precision, b2 varchar(8))'); }; $ev = $@; #1 ok(!$ev, 'create test tables'); SKIP: { skip "Failed to create test table", 10 if ($ev); eval { $dbh->do(q/insert into PERL_DBD_39841a values(1, 'aaaaaaaaaa')/); $dbh->do(q/insert into PERL_DBD_39841b values(1, 'bbbbbbbb')/); }; $ev = $@; #2 ok(!$ev, "populate tables"); eval { $sth = $dbh->prepare(q/select b1, ( select a2 from PERL_DBD_39841a where a1 = b1 ) from PERL_DBD_39841b where b2 = ?/); }; $ev = $@; #3 ok(!$ev, 'prepare select'); SKIP: { # 13 skip 'cannot prepare SQL for test', 13 if $ev; eval { local $sth->{PrintError} = 0; $sth->execute('bbbbbb'); }; my $ev = $@; SKIP: { # 5 if ($ev) { diag($dbh->errstr); diag($dbh->state); if ($dbh->state eq '22018') { diag("\nNOTE: Your SQL Server ODBC driver has a bug which can describe parameters\n"); diag("in SQL using sub selects incorrectly. In this case a VARCHAR(8) parameter\n"); diag("is described as an INTEGER\n\n"); skip 'test_1 execute failed - bug in SQL Server ODBC Driver', 5; } else { skip 'test_1 execute failed with unexpected error', 5; } } #1 pass('test_1 execute'); #2 is($sth->{NUM_OF_PARAMS}, 1, 'correct number of parameters'); #diag(Dumper($sth->{ParamTypes})); #3 is($sth->{NUM_OF_FIELDS}, 2, 'fields in result-set'); my $count; eval { while($sth->fetchrow_array) { $count = 0 if !defined($count); $count++}; }; #4 ok(!$ev, "fetchrow_array"); #5 ok(!defined($count), "no rows returned"); }; SKIP: { # 8 skip "no bug found", 8 if !$ev; skip "unexpected error this test is not checking for", 8 if ($dbh->state ne '22018'); diag("Checking you can work around bug in SQL Server ODBC Driver"); eval { $sth->bind_param(1, 'bbbbbb', SQL_VARCHAR); $sth->execute; }; $ev = $@; if ($ev) { diag("No you cannot"); skip "Cannot work around bug", 4; } else { diag("Yes you can"); #1 is($sth->{NUM_OF_PARAMS}, 1, 'correct number of parameters'); #2 is($sth->{NUM_OF_FIELDS}, 2, 'fields in result-set'); #diag(Dumper($sth->{ParamTypes})); my $pv = $sth->{ParamValues}; #3 ok(defined($pv), "Parameter values"); SKIP: { skip "no parameter values", 3 if !$pv; #1 is(ref($pv), 'HASH', 'parameter value hash'); #2 ok(exists($pv->{1}), 'parameter 1 exists'); SKIP: { skip "no p1", 1 if !exists($pv->{1}); #1 is($pv->{1}, 'bbbbbb', 'parameter has right value'); }; }; my $count; eval { while($sth->fetchrow_array) { $count = 0 if !defined($count); $count++}; }; #4 ok(!$ev, "fetchrow_array"); #5 ok(!defined($count), "no rows returned"); } }; }; } eval { local $dbh->{PrintWarn} = 0; local $dbh->{PrintError} = 0; $dbh->do('drop table PERL_DBD_39841a'); $dbh->do('drop table PERL_DBD_39841b'); }; } # # Here SQL Server gets confused and rearranges the SQL to find out about # PERL_DBD_39841a.a2 when it should have returned information about # PERL_DBD_39841b.b2. This used to lead to DBD::ODBC binding p1 as # 'bbbbbbbbbbbbbbbbbbbb' but specifying a column size of 10 - hence # data truncation error. # sub test_2 { $dbh = shift; my $sth; eval { local $dbh->{PrintError} = 1; $dbh->do('create table PERL_DBD_39841a (a1 integer, a2 varchar(10))'); $dbh->do('create table PERL_DBD_39841b (b1 varchar(10), b2 varchar(20))'); }; $ev = $@; #1 ok(!$ev, 'create test tables'); SKIP: { # 8 skip "Failed to create test table", 8 if ($ev); eval { $dbh->do(q/insert into PERL_DBD_39841a values(1, 'aaaaaaaaaa')/); $dbh->do(q/insert into PERL_DBD_39841b values('aaaaaaaaaa', 'bbbbbbbbbbbbbbbbbbbb')/); }; $ev = $@; #1 1 ok(!$ev, "populate tables"); eval { $sth = $dbh->prepare(q/select b1, ( select a2 from PERL_DBD_39841a where a2 = b1 ) from PERL_DBD_39841b where b2 = ?/); }; $ev = $@; #1 2 ok(!$ev, 'prepare select'); SKIP: { # 6 skip 'cannot prepare SQL for test', 6 if $ev; eval { local $sth->{PrintError} = 0; $sth->execute('bbbbbbbbbbbbbbbbbbbb'); }; my $ev = $@; SKIP: { # 5 + 1 if ($ev) { diag($dbh->errstr); diag($dbh->state); if ($dbh->state eq '22001') { diag("Bug 39841 is back in some unexpected way"); diag("Please report this via rt"); #1 fail('test_1 execute'); skip 'Bug 39841 is back', 5; } else { diag("Unexpected error - please report this via rt"); fail('test_1 execute'); #1 skip 'unexpected error', 5; } } else { #1 pass('test_1 execute'); } #2 is($sth->{NUM_OF_PARAMS}, 1, 'correct number of parameters'); #diag(Dumper($sth->{ParamTypes})); #3 is($sth->{NUM_OF_FIELDS}, 2, 'fields in result-set'); my $count; eval { while($sth->fetchrow_array) { $count = 0 if !defined($count); $count++}; }; #4 ok(!$ev, "fetchrow_array"); #5 ok(defined($count), "rows returned"); SKIP: { # 1 skip "no rows returned", 1 if !defined($count); # 6 is($count, 1, 'correct number of rows returned'); }; }; }; } eval { local $dbh->{PrintWarn} = 0; local $dbh->{PrintError} = 0; $dbh->do('drop table PERL_DBD_39841a'); $dbh->do('drop table PERL_DBD_39841b'); }; } DBD-ODBC-1.61/t/rt_57957.t0000644000175000017500000000476712250310263013752 0ustar martinmartin#!/usr/bin/perl -w -I./t use Test::More; use strict; # # Test rt57957 - comments in SQL were not ignored so placeholders like :name # and ? were seen. # Also tests for placeholders in literals. # $| = 1; my $has_test_nowarnings = 1; eval "require Test::NoWarnings"; $has_test_nowarnings = undef if $@; my $tests = 8; $tests += 1 if $has_test_nowarnings; plan tests => $tests; use DBI qw(:sql_types); use_ok('ODBCTEST'); # 1 #use_ok('Data::Dumper'); # 2 my $dbh; BEGIN { plan skip_all => "DBI_DSN is undefined" if (!defined $ENV{DBI_DSN}); } END { if ($dbh) { eval { local $dbh->{PrintWarn} = 0; local $dbh->{PrintError} = 0; $dbh->do(q/drop table PERL_DBD_rt_46597/); }; $dbh->disconnect; } Test::NoWarnings::had_no_warnings() if ($has_test_nowarnings); # 8 } $dbh = DBI->connect(); unless($dbh) { BAIL_OUT("Unable to connect to the database $DBI::errstr\nTests skipped.\n"); exit 0; } my $dbname = $dbh->get_info(17); # DBI::SQL_DBMS_NAME my $driver_name = $dbh->get_info(6); diag("\nSome of these tests may fail for your driver - please let me know if they do along with the strings $dbname/$driver_name"); # the point about the SQL in the next line is that if DBD::ODBC was # ignoring comments everything between /* and */ would be ignored but # if it is not ignored it looks like you have used the same placeholder # (:00) twice. $dbh->{PrintError} = 0; $dbh->{RaiseError} = 0; eval { my $sth = $dbh->prepare('select 1 /* $Date: 2010/05/01 12:00:00 */'); }; ok(!$@, "Prepare with trailing comment and named placeholder") or diag($@); eval { my $sth = $dbh->prepare('/* $Date: 2010/05/01 12:00:00 */ select 1'); }; ok(!$@, "Prepare with leading comment and named placeholder") or diag($@); eval { my $sth = $dbh->prepare(<<'EOT'); select -- $Date: 2010/05/01 12:00:00 1 EOT }; ok(!$@, "Prepare with line comment named placeholder") or diag($@); eval { my $sth = $dbh->prepare('/* placeholder ? in comment */ select 1'); }; ok(!$@, "Prepare with leading comment and ? placeholder") or diag($@); eval { my $sth = $dbh->prepare(<<'EOT'); select -- placeholder ? in a comment 1 EOT }; ok(!$@, "Prepare with line comment and ? placeholder") or diag($@); eval { my $sth = $dbh->prepare(q/select '?'/); }; ok(!$@, "Prepare with ? placeholder in literal") or diag($@); eval { my $sth = $dbh->prepare(q/select ':named'/); }; ok(!$@, "Prepare with named placeholder in literal") or diag($@); DBD-ODBC-1.61/t/70execute_array_dbi.t0000755000175000017500000000353612254015065016374 0ustar martinmartin#!/usr/bin/perl -w -I./t # loads of execute_array and execute_for_fetch tests using DBI's methods use Test::More; use strict; #use Data::Dumper; $| = 1; my $has_test_nowarnings = 1; eval "require Test::NoWarnings"; $has_test_nowarnings = undef if $@; my ($dbh, $ea); use DBI qw(:sql_types); use ExecuteArray; BEGIN { plan skip_all => "DBI_DSN is undefined" if (!defined $ENV{DBI_DSN}); } END { if ($dbh && $ea) { $ea->drop_table($dbh); $dbh->disconnect(); } Test::NoWarnings::had_no_warnings() if ($has_test_nowarnings); done_testing(); } diag("\n\nNOTE: This tests execute_array and execute_for_fetch using DBI's version and not the native DBD::ODBC execute_for_fetch. It should work as it is using nothing special in DBD::ODBC other than the normal methods."); $dbh = DBI->connect(); unless($dbh) { BAIL_OUT("Unable to connect to the database $DBI::errstr\nTests skipped.\n"); exit 0; } note("Using driver $dbh->{Driver}->{Name}"); $ea = ExecuteArray->new($dbh, 1); # set odbc_disable_array_operations $dbh = $ea->dbh; $ea->drop_table($dbh); ok($ea->create_table($dbh), "create test table") or exit 1; $ea->simple($dbh, {array_context => 1, raise => 1}); $ea->simple($dbh, {array_context => 0, raise => 1}); $ea->error($dbh, {array_context => 1, raise => 1}); $ea->error($dbh, {array_context => 0, raise => 1}); $ea->error($dbh, {array_context => 1, raise => 0}); $ea->error($dbh, {array_context => 0, raise => 0}); $ea->row_wise($dbh, {array_context => 1, raise => 1}); $ea->update($dbh, {array_context => 1, raise => 1}); $ea->error($dbh, {array_context => 1, raise => 1, notuplestatus => 1}); $ea->error($dbh, {array_context => 0, raise => 1, notuplestatus => 1}); $ea->error($dbh, {array_context => 1, raise => 0, notuplestatus => 1}); $ea->error($dbh, {array_context => 0, raise => 0, notuplestatus => 1}); DBD-ODBC-1.61/t/05meth.t0000755000175000017500000000665113035734125013655 0ustar martinmartin#!/usr/bin/perl -I./t ## TBd: these tests don't seem to be terribly useful #use sigtrap; use Test::More; use strict; $| = 1; my $has_test_nowarnings = 1; eval "require Test::NoWarnings"; $has_test_nowarnings = undef if $@; my $tests = 16; $tests += 1 if $has_test_nowarnings; plan tests => $tests; use_ok('DBI', qw(:sql_types)); use_ok('ODBCTEST'); use strict; my $dbh; BEGIN { if (!defined $ENV{DBI_DSN}) { plan skip_all => "DBI_DSN is undefined"; } } END { if ($dbh) { ODBCTEST::tab_delete($dbh); } Test::NoWarnings::had_no_warnings() if ($has_test_nowarnings); } my @row; $dbh = DBI->connect(); unless($dbh) { BAIL_OUT("Unable to connect to the database $DBI::errstr\nTests skipped.\n"); exit 0; } #### testing Tim's early draft DBI methods ok(ODBCTEST::tab_create($dbh), "Create tables"); my $r1 = $DBI::rows; $dbh->{AutoCommit} = 0; my $sth; $sth = $dbh->prepare("DELETE FROM $ODBCTEST::table_name"); ok($sth, "delete prepared statement"); $sth->execute(); cmp_ok($sth->rows, '>=', 0, "Number of rows >= 0"); cmp_ok($DBI::rows, '==', $sth->rows, "Number of rows from DBI matches sth"); $sth->finish(); $dbh->rollback(); pass("finished and rolled back"); $dbh->{RaiseError} = 1; $sth = $dbh->prepare("SELECT * FROM $ODBCTEST::table_name WHERE 1 = 0"); $sth->execute(); @row = $sth->fetchrow(); if ($sth->err) { diag(" $sth->err: " . $sth->err . "\n"); diag(" $sth->errstr: " . $sth->errstr . "\n"); diag(" $dbh->state: " . $dbh->state . "\n"); } ok(!$sth->err, "no error"); $sth->finish(); my ($a, $b); $sth = $dbh->prepare("SELECT COL_A, COL_B FROM $ODBCTEST::table_name"); $sth->execute(); while (@row = $sth->fetchrow()) { print " \@row a,b:", $row[0], ",", $row[1], "\n"; } $sth->finish(); my $skip; eval {$sth->execute()}; if (my $ev = $@) { if ($ev =~ /No query has been executed/) { fail("Looks like you might be using Postgres ODBC driver which will fail this test " . "unless you add UseDeclareFetch=1 to your DSN"); } else { fail("reexecute on prepared statement - $ev"); } $skip = 1; } else { ok('rexecute on prepared statement'); } SKIP: { skip "reexecute failed", 2 if $skip; $sth->bind_col(1, \$a); $sth->bind_col(2, \$b); while ($sth->fetch()) { print " bind_col a,b:", $a, ",", $b, "\n"; unless (defined($a) && defined($b)) { print "not "; last; } } pass("?"); $sth->finish(); ($a, $b) = (undef, undef); $sth->execute(); $sth->bind_columns(undef, \$b, \$a); while ($sth->fetch()) { print " bind_columns a,b:", $b, ",", $a, "\n"; unless (defined($a) && defined($b)) { print "not "; last; } } pass("??"); $sth->finish(); }; # turn off error warnings. We expect one here (invalid transaction state) $dbh->{RaiseError} = 0; $dbh->{PrintWarn} = 0; $dbh->{PrintError} = 0; ok( $dbh->{$_}, $_) for 'Active'; ok( $dbh-> $_ , $_) for 'ping'; ok( $dbh-> $_ , $_) for 'disconnect'; ok(!$dbh->{$_}, $_) for 'Active'; ok(!$dbh-> $_ , $_) for 'ping';; # $dbh->disconnect(); # already disconnected exit 0; # avoid warning on one use of DBI::errstr print $DBI::errstr; # make sure there is an invalid transaction state error at the end here. # (XXX not reliable, iodbc-2.12 with "INTERSOLV dBase IV ODBC Driver" == -1) #print "# DBI::err=$DBI::err\nnot " if $DBI::err ne "25000"; #print "ok 7\n"; DBD-ODBC-1.61/t/rt_null_nvarchar.t0000755000175000017500000000654612254015325016115 0ustar martinmartin#!/usr/bin/perl -w -I./t # # test varbinary(MAX) and varchar(MAX) types in SQL Server # Mostly rt_38977 with additional: # test you can insert NULL into VARxxx(MAX) types. # use Test::More; use strict; $| = 1; my $has_test_nowarnings = 1; eval "require Test::NoWarnings"; $has_test_nowarnings = undef if $@; my $tests = 8; $tests += 1 if $has_test_nowarnings; plan tests => $tests; # can't seem to get the imports right this way use DBI qw(:sql_types); my $dbh; BEGIN { if (!defined $ENV{DBI_DSN}) { plan skip_all => "DBI_DSN is undefined"; } } END { if ($dbh) { eval { local $dbh->{PrintWarn} = 0; local $dbh->{PrintError} = 0; $dbh->do(q/drop table PERL_DBD_rt_NLVC/); }; } Test::NoWarnings::had_no_warnings() if ($has_test_nowarnings); } $dbh = DBI->connect(); unless($dbh) { BAIL_OUT("Unable to connect to the database $DBI::errstr\nTests skipped.\n"); exit 0; } $dbh->{RaiseError} = 1; my $dbms_name = $dbh->get_info(17); ok($dbms_name, "got DBMS name: $dbms_name"); # 1 my $dbms_version = $dbh->get_info(18); ok($dbms_version, "got DBMS version: $dbms_version"); # 2 my $driver_name = $dbh->get_info(6); ok($driver_name, "got DRIVER name: $driver_name"); # 3 my $driver_version = $dbh->get_info(7); ok($driver_version, "got DRIVER version $driver_version"); # 4 my ($ev, $sth); SKIP: { skip "not SQL Server", 4 if $dbms_name !~ /Microsoft SQL Server/; skip "Easysoft OOB", 4 if $driver_name =~ /esoobclient/; my $major_version = $dbms_version; $major_version =~ s/^(\d+)\..*$/$1/; #diag("Major Version: $major_version\n"); skip "SQL Server version too old", 4 if $major_version < 9; eval { local $dbh->{PrintWarn} = 0; local $dbh->{PrintError} = 0; $dbh->do('drop table PERL_DBD_rt_NLVC'); }; eval { $dbh->do('create table PERL_DBD_rt_NLVC (a NVARCHAR(MAX) NULL)'); }; $ev = $@; ok(!$ev, 'create test table with nvarchar(max)'); # 5 SKIP: { skip "Failed to create test table", 2 if ($ev); eval { $sth = $dbh->prepare('INSERT into PERL_DBD_rt_NLVC VALUES (?)'); }; $ev = $@; ok($sth && !$@, "prepare insert"); # 6 SKIP: { skip "Failed to prepare", 2 if ($ev); my $x = 'x' x 500000; eval { $sth->execute($x); }; $ev = $@; ok(!$ev, "execute insert"); # 7 if ($ev) { diag("Execute for insert into varchar(max) failed with $ev"); diag(q/Some SQL Server drivers such as the native client 09.00.1399 / . q/driver fail this test with a HY104, "Invalid precision error". / . qq/You have driver $driver_name at version $driver_version. / . q/There is a free upgrade from Microsoft of the native client driver /. q/to 10.00.1600 which you will need if you intend to insert / . q/into varchar(max) columns./); } eval { $sth->execute(undef); }; ok(!$ev, 'insert NULL into VARCHAR(MAX)') || diag($ev); # 8 }; }; eval { local $dbh->{PrintWarn} = 0; local $dbh->{PrintError} = 0; $dbh->do('drop table PERL_DBD_rt_NLVC'); }; }; DBD-ODBC-1.61/t/rt_63550.t0000755000175000017500000000157112254015303013726 0ustar martinmartin#!/usr/bin/perl -w -I./t # # rt_53550 - check Statement is accessible in error handler from do method # use Test::More; use strict; use DBI qw(:sql_types); use_ok('ODBCTEST'); eval "require Test::NoWarnings"; my $has_test_nowarnings = ($@ ? undef : 1); my $dbh; BEGIN { if (!defined $ENV{DBI_DSN}) { plan skip_all => "DBI_DSN is undefined"; } } END { Test::NoWarnings::had_no_warnings() if ($has_test_nowarnings); done_testing(); } $dbh = DBI->connect(); unless($dbh) { BAIL_OUT("Unable to connect to the database $DBI::errstr\nTests skipped.\n"); exit 0; } $dbh->{RaiseError} = 0; $dbh->{PrintError} = 0; $dbh->{ShowErrorStatement} = 0; sub _err_handler { my ($error, $h) = @_; ok(defined($h->{Statement}), 'Statement is defined'); return 0; } $dbh->{HandleError} = \&_err_handler; $dbh->do("select * from PERL_DBD_RT63550"); DBD-ODBC-1.61/t/41Unicode.t0000644000175000017500000001026012250310263014261 0ustar martinmartin#!/usr/bin/perl -w -I./t # based on *Id: 20SqlServer.t 568 2004-11-08 15:12:37Z jurl * use strict; use warnings; use UChelp; use Test::More; use DBI qw(:sql_types); my $has_test_nowarnings; $|=1; my $WAIT=0; my @data; my $tests; my $data_tests; my $other_tests; BEGIN { if ($] < 5.008001) { plan skip_all => "Old Perl lacking unicode support"; } elsif (!defined $ENV{DBI_DSN}) { plan skip_all => "DBI_DSN is undefined"; } @data=( "hello ASCII: the quick brown fox jumps over the yellow dog", "Hello Unicode: german umlauts (\x{00C4}\x{00D6}\x{00DC}\x{00E4}\x{00F6}\x{00FC}\x{00DF}) smile (\x{263A}) hebrew shalom (\x{05E9}\x{05DC}\x{05D5}\x{05DD})", ); push @data,map { "again $_" } @data; utf8::is_utf8($data[0]) and die "Perl set UTF8 flag on non-unicode string constant"; utf8::is_utf8($data[1]) or die "Perl did not set UTF8 flag on unicode string constant"; utf8::is_utf8($data[2]) and die "Perl set UTF8 flag on non-unicode string constant"; utf8::is_utf8($data[3]) or die "Perl did not set UTF8 flag on unicode string constant"; $data_tests=12*@data; $other_tests = 7; $tests = $other_tests + $data_tests; eval "require Test::NoWarnings"; if (!$@) { $has_test_nowarnings = 1; } $tests += 1 if $has_test_nowarnings; plan tests => $tests, } END { Test::NoWarnings::had_no_warnings() if ($has_test_nowarnings); } my $dbh=DBI->connect(); ok(defined($dbh),"DBI connect"); SKIP: { if (!$dbh->{odbc_has_unicode}) { skip "Unicode-specific tests disabled - not a unicode build", $data_tests + $other_tests - 1; } my $dbname = $dbh->get_info(17); # DBI::SQL_DBMS_NAME SKIP: { my ($sth,$NVARCHAR); if ($dbname=~/Microsoft SQL Server/i) { ($NVARCHAR)=('NVARCHAR(1000)'); } elsif ($dbname=~/Oracle/i) { ($NVARCHAR)=('NVARCHAR2(1000)'); } elsif ($dbname=~/PostgreSQL/i) { ($NVARCHAR)=('VARCHAR(1000)'); } elsif ($dbname=~/ACCESS/i) { ($NVARCHAR)=('MEMO'); } elsif ($dbname=~/DB2/i) { ($NVARCHAR)=('VARGRAPHIC(500)'); } else { skip "Tests not supported using $dbname", $data_tests + $other_tests - 1; } $dbh->{RaiseError} = 1; $dbh->{'LongTruncOk'}=1; $dbh->{'LongReadLen'}=32000; eval { local $dbh->{PrintError}=0; $dbh->do("DROP TABLE PERL_DBD_TABLE1"); }; pass("Drop old test table"); $dbh->{RaiseError} = 1; $dbh->do(<<__SQL__); CREATE TABLE PERL_DBD_TABLE1 ( i INTEGER NOT NULL PRIMARY KEY, nva $NVARCHAR, nvb $NVARCHAR, nvc $NVARCHAR ) __SQL__ pass("Create test table"); # Insert records into the database: $sth=$dbh->prepare("INSERT INTO PERL_DBD_TABLE1 (i,nva,nvb,nvc) values (?,?,?,?)"); ok(defined($sth),"prepare insert statement"); for (my $i=0; $i<@data; $i++) { my ($nva,$nvb,$nvc)=($data[$i]) x 3; $sth->bind_param (1, $i, SQL_INTEGER); pass("Bind parameter SQL_INTEGER"); $sth->bind_param (2, $nva); pass("Bind parameter default"); $sth->bind_param (3, $nvb, SQL_WVARCHAR); pass("Bind parameter SQL_WVARCHAR"); $sth->bind_param (4, $nvc, SQL_WVARCHAR); pass("Bind parameter SQL_WVARCHAR"); $sth->execute(); pass("execute()"); } $sth->finish(); # Retrieve records from the database, and see if they match original data: $sth=$dbh->prepare("SELECT i,nva,nvb,nvc FROM PERL_DBD_TABLE1"); ok(defined($sth),'prepare select statement'); $sth->execute(); pass('execute select statement'); while (my ($i,$nva,$nvb,$nvc)=$sth->fetchrow_array()) { my $info=sprintf("(index=%i, Unicode=%s)",$i,utf8::is_utf8($data[$i]) ? 'on' : 'off'); pass("fetch select statement $info"); cmp_ok(utf8::is_utf8($nva),'>=',utf8::is_utf8($data[$i]),"utf8 flag $info col1"); utf_eq_ok($nva,$data[$i],"value matches $info col1"); cmp_ok(utf8::is_utf8($nvb),'>=',utf8::is_utf8($data[$i]),"utf8 flag $info col2"); utf_eq_ok($nva,$data[$i],"value matches $info col2"); cmp_ok(utf8::is_utf8($nvc),'>=',utf8::is_utf8($data[$i]),"utf8 flag $info col3"); utf_eq_ok($nva,$data[$i],"value matches $info col3"); } $WAIT && eval { print "you may want to look at the table now, the unicode data is damaged!\nHit Enter to continue\n"; $_=; }; # eval { # local $dbh->{RaiseError} = 0; # $dbh->do("DROP TABLE PERL_DBD_TABLE1"); # }; $dbh->disconnect; pass("all done"); } }; exit 0; DBD-ODBC-1.61/t/rt_101579.t0000644000175000017500000000315212537511470014016 0ustar martinmartin#!/usr/bin/perl -w -I./t # # rt 101579 # # Between 1.43 and 1.50 DBD::ODBC changed to add check_for_unicode_param # function which changes bound types of SQL_VARCHAR etc to their unicode # equivalent if the perl scalar is unicode. Unfortunately, if the scalar was not unicode # or the described type was not VARCHAR it returned the SQLDescribeParam # described type ignoring the fact we map SQL_NUMERIC etc to SQL_VARCHAR. # The result is the first call to execute works and subsequent calls often return # string data, right truncated for numeric parameters. # use Test::More; use strict; use DBI; use_ok('ODBCTEST'); eval "require Test::NoWarnings"; my $has_test_nowarnings = ($@ ? undef : 1); my $dbh; BEGIN { if (!defined $ENV{DBI_DSN}) { plan skip_all => "DBI_DSN is undefined"; } } END { if ($dbh) { local $dbh->{PrintWarn} = 0; local $dbh->{PrintError} = 0; $dbh->do(q/drop table PERL_DBD_RT_101579/); $dbh->disconnect; } Test::NoWarnings::had_no_warnings() if ($has_test_nowarnings); done_testing(); } $dbh = DBI->connect(); unless($dbh) { BAIL_OUT("Unable to connect to the database $DBI::errstr\nTests skipped.\n"); exit 0; } $dbh->{RaiseError} = 0; $dbh->do(q/create table PERL_DBD_RT_101579 (a varchar(500), val numeric(9,2))/) or BAIL_OUT("Failed to create test table " . $dbh->errstr); my @vals = (8295.60, 181161.80, 6514.15); my $sth = $dbh->prepare(q/insert into PERL_DBD_RT_101579 (a, val) values(?,?)/); foreach my $val (@vals) { eval { $sth->execute('fred', $val); }; my $ev = $@; ok(!$ev, "Inserted $val"); } DBD-ODBC-1.61/t/ExecuteArray.pm0000644000175000017500000004333012254015207015310 0ustar martinmartinpackage ExecuteArray; use Test::More; use Data::Dumper; use DBI; our $VERSION = '0.01'; my $table = 'PERL_DBD_execute_array'; my $table2 = 'PERL_DBD_execute_array2'; my @p1 = (1,2,3,4,5); my @p2 = qw(one two three four five); my $fetch_row = 0; my @captured_error; # values captured in error handler sub error_handler { @captured_error = @_; note("***** error handler called *****"); 0; # pass errors on } sub new { my ($class, $dbh, $dbi_version) = @_; my $self = {}; $dbh = setup($dbh, $dbi_version); $self->{_dbh} = $dbh; # find out how the driver supports row counts and parameter status $self->{_param_array_row_counts} = $dbh->get_info(153); # a return of 1 is SQL_PARC_BATCH which means: # Individual row counts are available for each set of parameters. This is # conceptually equivalent to the driver generating a batch of SQL # statements, one for each parameter set in the array. Extended error # information can be retrieved by using the SQL_PARAM_STATUS_PTR # descriptor field. # a return of 2 is SQL_PARC_NO_BATCH which means: # There is only one row count available, which is the cumulative row # count resulting from the execution of the statement for the entire # array of parameters. This is conceptually equivalent to treating # the statement together with the complete parameter array as one # atomic unit. Errors are handled the same as if one statement # were executed. return bless ($self, $class); } sub dbh { my $self = shift; return $self->{_dbh}; } sub setup { my ($dbh, $dbi_version) = @_; $dbh = enable_mars($dbh, $native); $dbh->{HandleError} = \&error_handler; if ($dbi_version) { $dbh->{odbc_disable_array_operations} = 1; } #$dbh->{ora_verbose} = 5; $dbh->{RaiseError} = 1; $dbh->{PrintError} = 0; $dbh->{ChopBlanks} = 1; $dbh->{AutoCommit} = 1; return $dbh; } sub create_table { my ($self, $dbh) = @_; eval { $dbh->do(qq/create table $table (a integer not null primary key, b char(20))/); }; if ($@) { diag("Failed to create test table $table - $@"); return 0; } eval { $dbh->do(qq/create table $table2 (a integer not null primary key, b char(20))/); }; if ($@) { diag("Failed to create test table $table2 - $@"); return 0; } my $sth = $dbh->prepare(qq/insert into $table2 values(?,?)/); for (my $row = 0; $row < @p1; $row++) { $sth->execute($p1[$row], $p2[$row]); } 1; } sub drop_table { my ($self, $dbh) = @_; eval { local $dbh->{PrintError} = 0; local $dbh->{PrintWarn} = 0; $dbh->do(qq/drop table $table/); $dbh->do(qq/drop table $table2/); }; note("Table dropped"); } # clear the named table of rows sub clear_table { $_[0]->do(qq/delete from $_[1]/); } # check $table contains the data in $c1, $c2 which are arrayrefs of values sub check_data { my ($dbh, $c1, $c2) = @_; my $data = $dbh->selectall_arrayref(qq/select * from $table order by a/); my $row = 0; foreach (@$data) { is($_->[0], $c1->[$row], "row $row p1 data"); is($_->[1], $c2->[$row], "row $row p2 data"); $row++; } } sub check_tuple_status { my ($self, $tsts, $expected) = @_; note(Data::Dumper->Dump([$tsts], [qw(ArrayTupleStatus)])); BAIL_OUT('expected data must be specified') if (!$expected || (ref($expected) ne 'ARRAY')); is(ref($tsts), 'ARRAY', 'tuple status is an array') or return; if (!is(scalar(@$tsts), scalar(@$expected), 'status arrays same size')) { diag(Dumper($tsts)); diag(Dumper($expected)); return; } my $row = 0; foreach my $s (@$expected) { if (ref($s)) { unless ($self->{_param_array_row_counts} == 2) { is(ref($tsts->[$row]), 'ARRAY', 'array in array tuple status'); is(scalar(@{$tsts->[$row]}), 3, '3 elements in array tuple status error'); } } else { if ($tsts->[$row] == -1) { pass("row $row tuple status unknown"); } else { is($tsts->[$row], $s, "row $row tuple status"); } } $row++; } return; } # insert might return 'mas' which means the caller said the test # required Multiple Active Statements and the driver appeared to not # support MAS. # # ref is a hash ref: # error (0|1) whether we expect an error # raise (0|1) means set RaiseError to this # commit (0|1) do the inserts in a txn # tuple arrayref of what we expect in the tuple status # e.g., [1,1,1,1,[]] # where the empty [] signifies we expect an error for this row # where 1 signifies we the expect row count for this row # affected - the total number of rows affected for insert/update # sub insert { my ($self, $dbh, $sth, $ref) = @_; die "need hashref arg" if (!$ref || (ref($ref) ne 'HASH')); note("insert " . join(", ", map {"$_ = ". DBI::neat($ref->{$_})} keys %$ref )); # DBD::Oracle supports MAS don't compensate for it not if ($ref->{requires_mas} && $dbh->{Driver}->{Name} eq 'Oracle') { delete $ref->{requires_mas}; } @captured_error = (); if ($ref->{raise}) { $sth->{RaiseError} = 1; } else { $sth->{RaiseError} = 0; } my (@tuple_status, $sts, $total_affected); my $tuple_status_arg = {}; $tuple_status_arg->{ArrayTupleStatus} = \@tuple_status unless $ref->{notuplestatus}; $sts = 999999; # to ensure it is overwritten $total_affected = 999998; if ($ref->{array_context}) { eval { if ($ref->{params}) { ($sts, $total_affected) = $sth->execute_array($tuple_status_arg, @{$ref->{params}}); } elsif ($ref->{fetch}) { ($sts, $total_affected) = $sth->execute_array( {%{$tuple_status_arg}, ArrayTupleFetch => $ref->{fetch}}); } else { ($sts, $total_affected) = $sth->execute_array($tuple_status_arg); } }; } else { eval { if ($ref->{params}) { $sts = $sth->execute_array($tuple_status_arg, @{$ref->{params}}); } else { $sts = $sth->execute_array($tuple_status_arg); } }; } my $ev = $@; if ($ref->{error} && $ref->{raise}) { ok($ev, 'error in execute_array eval'); } elsif ($ref->{requires_mas} && $ev) { diag("\nThis test died with $ev"); diag("It requires multiple active statement support in the driver and I cannot easily determine if your driver supports MAS. Ignoring the rest of this test."); foreach (@tuple_status) { if (ref($_)) { diag(join(",", @$_)); } } return 'mas'; } elsif ($ref->{raise} && $ev) { BAIL_OUT("Totally unexpected error - $ev"); } else { ok(!$@, 'no error in execute_array eval') or note($@); } $dbh->commit if $ref->{commit}; if (!$ref->{raise} || ($ref->{error} == 0)) { if (exists($ref->{sts})) { is($sts, $ref->{sts}, "execute_array returned " . DBI::neat($sts) . " rows executed"); } if (exists($ref->{affected}) && $ref->{array_context}) { is($total_affected, $ref->{affected}, "total affected " . DBI::neat($total_affected)) } } if ($ref->{raise}) { if ($ref->{error}) { ok(scalar(@captured_error) > 0, "error captured"); } else { is(scalar(@captured_error), 0, "no error captured"); } } if ($ref->{sts}) { is(scalar(@tuple_status), (($ref->{sts} eq '0E0') ? 0 : $ref->{sts}), "$ref->{sts} rows in tuple_status"); } if ($ref->{tuple} && !exists($ref->{notuplestatus})) { $self->check_tuple_status(\@tuple_status, $ref->{tuple}); } return; } # simple test on ensure execute_array with no errors: # o checks returned status and affected is correct # o checks ArrayTupleStatus is correct # o checks no error is raised # o checks rows are inserted # o run twice with AutoCommit on/off # o checks if less values are specified for one parameter the right number # of rows are still inserted and NULLs are placed in the missing rows # checks binding via bind_param_array and adding params to execute_array # checks binding no parameters at all sub simple { my ($self, $dbh, $ref) = @_; note('simple tests ' . join(", ", map {"$_ = $ref->{$_}"} keys %$ref )); note(" all param arrays the same size"); foreach my $commit (1,0) { note(" Autocommit: $commit"); clear_table($dbh, $table); $dbh->begin_work if !$commit; my $sth = $dbh->prepare(qq/insert into $table values(?,?)/); $sth->bind_param_array(1, \@p1); $sth->bind_param_array(2, \@p2); $self->insert($dbh, $sth, { commit => !$commit, error => 0, sts => 5, affected => 5, tuple => [1, 1, 1, 1, 1], %$ref}); check_data($dbh, \@p1, \@p2); } note " Not all param arrays the same size"; clear_table($dbh, $table); my $sth = $dbh->prepare(qq/insert into $table values(?,?)/); $sth->bind_param_array(1, \@p1); $sth->bind_param_array(2, [qw(one)]); $self->insert($dbh, $sth, {commit => 0, error => 0, raise => 1, sts => 5, affected => 5, tuple => [1, 1, 1, 1, 1], %$ref}); check_data($dbh, \@p1, ['one', undef, undef, undef, undef]); note " Not all param arrays the same size with bind on execute_array"; clear_table($dbh, $table); $sth = $dbh->prepare(qq/insert into $table values(?,?)/); $self->insert($dbh, $sth, {commit => 0, error => 0, raise => 1, sts => 5, affected => 5, tuple => [1, 1, 1, 1, 1], %$ref, params => [\@p1, [qw(one)]]}); check_data($dbh, \@p1, ['one', undef, undef, undef, undef]); note " no parameters"; clear_table($dbh, $table); $sth = $dbh->prepare(qq/insert into $table values(?,?)/); $self->insert($dbh, $sth, {commit => 0, error => 0, raise => 1, sts => '0E0', affected => 0, tuple => [], %$ref, params => [[], []]}); check_data($dbh, \@p1, ['one', undef, undef, undef, undef]); } # error test to ensure correct behavior for execute_array when it errors: # o execute_array of 5 inserts with last one failing # o check it raises an error # o check caught error is passed on from handler for eval # o check returned status and affected rows # o check ArrayTupleStatus # o check valid inserts are inserted # o execute_array of 5 inserts with 2nd last one failing # o check it raises an error # o check caught error is passed on from handler for eval # o check returned status and affected rows # o check ArrayTupleStatus # o check valid inserts are inserted sub error { my ($self, $dbh, $ref) = @_; die "need hashref arg" if (!$ref || (ref($ref) ne 'HASH')); note('error tests ' . join(", ", map {"$_ = $ref->{$_}"} keys %$ref )); { note("Last row in error"); clear_table($dbh, $table); my $sth = $dbh->prepare(qq/insert into $table values(?,?)/); my @pe1 = @p1; $pe1[-1] = 1; $sth->bind_param_array(1, \@pe1); $sth->bind_param_array(2, \@p2); $self->insert($dbh, $sth, {commit => 0, error => 1, sts => undef, affected => undef, tuple => [1, 1, 1, 1, []], %$ref}); check_data($dbh, [@pe1[0..4]], [@p2[0..4]]); } { note("2nd last row in error"); clear_table($dbh, $table); my $sth = $dbh->prepare(qq/insert into $table values(?,?)/); my @pe1 = @p1; $pe1[-2] = 1; $sth->bind_param_array(1, \@pe1); $sth->bind_param_array(2, \@p2); $self->insert($dbh, $sth, {commit => 0, error => 1, sts => undef, affected => undef, tuple => [1, 1, 1, [], 1], %$ref}); check_data($dbh, [@pe1[0..2],$pe1[4]], [@p2[0..2], $p2[4]]); } } sub fetch_sub { note("fetch_sub $fetch_row"); if ($fetch_row == @p1) { note('returning undef'); $fetch_row = 0; return; } return [$p1[$fetch_row], $p2[$fetch_row++]]; } # test insertion via execute_array and ArrayTupleFetch sub row_wise { my ($self, $dbh, $ref) = @_; note("row_size via execute_for_fetch"); # Populate the first table via a ArrayTupleFetch which points to a sub # returning rows $fetch_row = 0; # reset fetch_sub to start with first row clear_table($dbh, $table); my $sth = $dbh->prepare(qq/insert into $table values(?,?)/); $self->insert($dbh, $sth, {commit => 0, error => 0, sts => 5, affected => 5, tuple => [1, 1, 1, 1, 1], %$ref, fetch => \&fetch_sub}); # NOTE: The following test requires Multiple Active Statements. Although # I can find ODBC drivers which do this it is not easy (if at all possible) # to know if an ODBC driver can handle MAS or not. If it errors the # driver probably does not have MAS so the error is ignored and a # diagnostic is output. Exceptions are DBD::Oracle which definitely does # support MAS. # The data pushed into the first table is retrieved via ArrayTupleFetch # from the second table by passing an executed select statement handle into # execute_array. note("row_size via select"); clear_table($dbh, $table); $sth = $dbh->prepare(qq/insert into $table values(?,?)/); my $sth2 = $dbh->prepare(qq/select * from $table2/); # some drivers issue warnings when mas fails and this causes # Test::NoWarnings to output something when we already found # the test failed and captured it. # e.g., some ODBC drivers cannot do MAS and this test is then expected to # fail but we ignore the failure. Unfortunately in failing DBD::ODBC will # issue a warning in addition to the fail $sth->{Warn} = 0; $sth->{Warn} = 0; ok($sth2->execute, 'execute on second table') or diag($sth2->errstr); ok($sth2->{Executed}, 'second statement is in executed state'); my $res = $self->insert($dbh, $sth, {commit => 0, error => 0, sts => 5, affected => 5, tuple => [1, 1, 1, 1, 1], %$ref, fetch => $sth2, requires_mas => 1}); return if $res && $res eq 'mas'; # aborted , does not seem to support MAS check_data($dbh, \@p1, \@p2); } # test updates # updates are special as you can update more rows than there are parameter rows sub update { my ($self, $dbh, $ref) = @_; note("update test"); # populate the first table with the default 5 rows using a ArrayTupleFetch $fetch_row = 0; clear_table($dbh, $table); my $sth = $dbh->prepare(qq/insert into $table values(?,?)/); $self->insert($dbh, $sth, {commit => 0, error => 0, sts => 5, affected => 5, tuple => [1, 1, 1, 1, 1], %$ref, fetch => \&fetch_sub}); check_data($dbh, \@p1, \@p2); # update all rows b column to 'fred' checking rows affected is 5 $sth = $dbh->prepare(qq/update $table set b = ? where a = ?/); # NOTE, this also checks you can pass a scalar to bind_param_array $sth->bind_param_array(1, 'fred'); $sth->bind_param_array(2, \@p1); $self->insert($dbh, $sth, {commit => 0, error => 0, sts => 5, affected => 5, tuple => [1, 1, 1, 1, 1], %$ref}); check_data($dbh, \@p1, [qw(fred fred fred fred fred)]); # update 4 rows column b to 'dave' checking rows affected is 4 $sth = $dbh->prepare(qq/update $table set b = ? where a = ?/); # NOTE, this also checks you can pass a scalar to bind_param_array $sth->bind_param_array(1, 'dave'); my @pe1 = @p1; $pe1[-1] = 10; # non-existant row $sth->bind_param_array(2, \@pe1); $self->insert($dbh, $sth, {commit => 0, error => 0, sts => 5, affected => 4, tuple => [1, 1, 1, 1, '0E0'], %$ref}); check_data($dbh, \@p1, [qw(dave dave dave dave fred)]); # now change all rows b column to 'pete' - this will change all 5 # rows even though we have 2 rows of parameters so we can see if # the rows affected is > parameter rows $sth = $dbh->prepare(qq/update $table set b = ? where b like ?/); # NOTE, this also checks you can pass a scalar to bind_param_array $sth->bind_param_array(1, 'pete'); $sth->bind_param_array(2, ['dave%', 'fred%']); $self->insert($dbh, $sth, {commit => 0, error => 0, sts => 2, affected => 5, tuple => [4, 1], %$ref}); check_data($dbh, \@p1, [qw(pete pete pete pete pete)]); } sub enable_mars { my $dbh = shift; # this test uses multiple active statements # if we recognise the driver and it supports MAS enable it my $driver_name = $dbh->get_info(6) || ''; if (($driver_name eq 'libessqlsrv.so') || ($driver_name =~ /libsqlncli/)) { my $dsn = $ENV{DBI_DSN}; if ($dsn !~ /^dbi:ODBC:DSN=/ && $dsn !~ /DRIVER=/i) { my @a = split(q/:/, $ENV{DBI_DSN}); $dsn = join(q/:/, @a[0..($#a - 1)]) . ":DSN=" . $a[-1]; } $dsn .= ";MARS_Connection=yes"; $dbh->disconnect; $dbh = DBI->connect($dsn, $ENV{DBI_USER}, $ENV{DBI_PASS}); } return $dbh; } 1; DBD-ODBC-1.61/t/03dbatt.t0000755000175000017500000001563213035731102014003 0ustar martinmartin#!perl -w -I./t use Test::More; use strict; use Data::Dumper; my $has_test_nowarnings = 1; eval "require Test::NoWarnings"; $has_test_nowarnings = undef if $@; my $tests = 26 + 4; $tests += 1 if $has_test_nowarnings; plan tests => $tests; $|=1; use_ok('DBI', qw(:sql_types)); use_ok('ODBCTEST'); my $dbh; BEGIN { if (!defined $ENV{DBI_DSN}) { plan skip_all => "DBI_DSN is undefined"; } } END { if ($dbh) { ODBCTEST::tab_delete($dbh); } Test::NoWarnings::had_no_warnings() if ($has_test_nowarnings); } my @row; $dbh = DBI->connect(); unless($dbh) { BAIL_OUT("Unable to connect to the database $DBI::errstr\nTests skipped.\n"); exit 0; } $dbh->{LongReadLen} = 1000; is($dbh->{LongReadLen}, 1000, "Set Long Read Len"); my $dbname = $dbh->{odbc_SQL_DBMS_NAME}; ok(ODBCTEST::tab_create($dbh), "Create tables"); #### testing set/get of connection attributes $dbh->{RaiseError} = 0; $dbh->{AutoCommit} = 1; ok($dbh->{AutoCommit}, "AutoCommit set on dbh"); my $rc = commitTest($dbh); diag(" Strange: " . $dbh->errstr . "\n") if ($rc < -1); SKIP: { skip "skipped due to lack of transaction support", 3 if ($rc == -1); is($rc, 1, "commitTest with AutoCommit"); $dbh->{AutoCommit} = 0; ok(!$dbh->{AutoCommit}, "AutoCommit turned off"); $rc = commitTest($dbh); diag(" Strange: " . $dbh->errstr . "\n") if ($rc < -1); is($rc, 0, "commitTest with AutoCommit off"); }; $dbh->{AutoCommit} = 1; ok($dbh->{AutoCommit}, "Ensure autocommit back on"); # ------------------------------------------------------------ my $rows = 0; # Check for tables function working. my $sth; my @table_info_cols = ( 'TABLE_CAT', 'TABLE_SCHEM', 'TABLE_NAME', 'TABLE_TYPE', 'REMARKS', ); my @odbc2_table_info_cols = ( 'TABLE_QUALIFIER', 'TABLE_OWNER', 'TABLE_NAME', 'TABLE_TYPE', 'REMARKS'); SKIP: { $sth = $dbh->table_info(); skip "table_info returned undef sth", 7 unless $sth; my $cols = $sth->{NAME}; isa_ok($cols, 'ARRAY', "sth {NAME} returns ref to array"); diag("\nN.B. Some drivers (postgres/cache) may return ODBC 2.0 column names for the SQLTables result-set e.g. TABLE_QUALIFIER instead of TABLE_CAT"); for (my $i = 0; $i < @$cols; $i++) { # print ${$cols}[$i], ": ", $sth->func($i+1, 3, ColAttributes), # "\n"; ok(($cols->[$i] eq $table_info_cols[$i]) || ($cols->[$i] eq $odbc2_table_info_cols[$i]), "Column test for table_info $i") or diag("${$cols}[$i] ne $table_info_cols[$i]"); if (($cols->[$i] ne $table_info_cols[$i]) && ($cols->[$i] eq $odbc2_table_info_cols[$i])) { diag("Your driver is returning ODBC 2.0 column names for the SQLTables result-set"); diag(" $odbc2_table_info_cols[$i] instead of $table_info_cols[$i]"); } } while (@row = $sth->fetchrow()) { $rows++; } cmp_ok($rows, '>', 0, "must be some tables out there?"); $sth->finish(); }; $rows = 0; $dbh->{PrintError} = 0; my @tables = $dbh->tables; cmp_ok(@tables, '>', 0, "tables returns array"); $rows = 0; if ($sth = $dbh->column_info(undef, undef, $ODBCTEST::table_name, undef)) { my $fetched = $sth->fetchall_arrayref; cmp_ok(scalar(@$fetched), '>', 0, "column info returns a row for test table " . $ODBCTEST::table_name) or diag(Dumper($fetched)); } $rows = 0; if ($sth = $dbh->primary_key_info(undef, undef, $ODBCTEST::table_name, undef)) { while (@row = $sth->fetchrow()) { $rows++; } $sth->finish(); } SKIP: { skip "Primary Key Known to fail using MS Access through 2000", 1 if ($dbname =~ /Access/i); cmp_ok($rows, '>', 0, "primary key count"); }; # test $sth->{NAME} when using non-select statements $sth = $dbh->prepare("update $ODBCTEST::table_name set COL_A = 100 WHERE COL_A = 100"); ok($sth, "prepare update statement returns valid sth "); is(@{$sth->{NAME}}, 0, "update statement has 0 columns returned"); $sth->execute; SKIP: { skip 'Testing $sth->{NAME} after successful execute on update statement known to fail in Postgres', 1 if ($dbname =~ /PostgreSQL/i); is(@{$sth->{NAME}}, 0, "update statement has 0 columns returned 2"); }; is($dbh->{odbc_query_timeout}, 0, 'verify default dbh odbc_query_timeout = 0'); my $sth_timeout = $dbh->prepare("select COL_A from $ODBCTEST::table_name"); is($sth_timeout->{odbc_query_timeout}, 0, 'verify default sth odbc_query_timeout = 0'); $sth_timeout = undef; $dbh->{odbc_query_timeout} = 30; is($dbh->{odbc_query_timeout}, 30, "Verify odbc_query_timeout set ok"); $sth_timeout = $dbh->prepare("select COL_A from $ODBCTEST::table_name"); is($sth_timeout->{odbc_query_timeout}, 30, "verify dbh setting for query_timeout passed to sth"); $sth_timeout->{odbc_query_timeout} = 1; is($sth_timeout->{odbc_query_timeout}, 1, "verify sth query_timeout can be overridden"); # odbc_column_display_size is($dbh->{odbc_column_display_size}, 2001, 'verify default for odbc_column_display_size'); ok($dbh->{odbc_column_display_size} = 3000, 'set odbc_column_display_size'); is($dbh->{odbc_column_display_size}, 3000, 'verify changed odbc_column_display_size'); $dbh->disconnect; exit 0; # avoid annoying warning print $DBI::errstr; # print STDERR $dbh->{odbc_SQL_DRIVER_ODBC_VER}, "\n"; # ------------------------------------------------------------ # returns true when a row remains inserted after a rollback. # this means that autocommit is ON. # ------------------------------------------------------------ sub commitTest { my $dbh = shift; my $rc = -2; my $sth; # since this test deletes the record, we should do it regardless # of whether or not it the db supports transactions. $dbh->do("DELETE FROM $ODBCTEST::table_name WHERE COL_A = 100") or return undef; { # suppress the "commit ineffective" warning local($SIG{__WARN__}) = sub { }; $dbh->commit(); } my $supported = $dbh->get_info(46); # SQL_TXN_CAPABLE # print "Transactions supported: $supported\n"; if (!$supported) { return -1; } my $row = ODBCTEST::get_type_for_column($dbh, 'COL_D'); my $dateval; if (ODBCTEST::isDateType($row->{DATA_TYPE})) { $dateval = "{d '1997-01-01'}"; } else { $dateval = "{ts '1997-01-01 00:00:00'}"; } $dbh->do("insert into $ODBCTEST::table_name values(100, 'x', 'y', $dateval)"); { # suppress the "rollback ineffective" warning local($SIG{__WARN__}) = sub { }; $dbh->rollback(); } $sth = $dbh->prepare("SELECT COL_A FROM $ODBCTEST::table_name WHERE COL_A = 100"); $sth->execute(); if (@row = $sth->fetchrow()) { $rc = 1; } else { $rc = 0; } # in case not all rows have been returned..there shouldn't be more than one. $sth->finish(); $rc; } # ------------------------------------------------------------ DBD-ODBC-1.61/t/10handler.t0000755000175000017500000000513712254015023014316 0ustar martinmartin#!/usr/bin/perl -w -I./t use Test::More; use strict; $| = 1; my $has_test_nowarnings = 1; eval "require Test::NoWarnings"; $has_test_nowarnings = undef if $@; my $tests = 10; $tests += 1 if $has_test_nowarnings; plan tests => $tests; use_ok('ODBCTEST'); BEGIN { if (!defined $ENV{DBI_DSN}) { plan skip_all => "DBI_DSN is undefined"; } } END { Test::NoWarnings::had_no_warnings() if ($has_test_nowarnings); } my $dbh = DBI->connect(); unless($dbh) { BAIL_OUT("Unable to connect to the database $DBI::errstr\nTests skipped.\n"); exit 0; } $dbh->{PrintError} = 0; $dbh->{RaiseError} = 1; # # check error handler is called, the right args are passed and the error # is propagated if the handler returns true # my ($errmsg, $errstate, $errnative, $handler_called); my $handler_return = 1; $handler_called = 0; sub err_handler { ($errstate, $errmsg, $errnative) = @_; $handler_called++; #diag "===> state: $errstate\n"; #diag "===> msg: $errmsg\n"; #diag "===> nativeerr: $errnative\n"; return $handler_return; } $dbh->{odbc_err_handler} = \&err_handler; my $evalret = eval { # this sql is supposed to be invalid my $sth = $dbh->prepare('select * from'); $sth->execute; return 99; }; my $eval = $@; #diag "eval returned " . ($evalret ? $evalret : "undef") . "\n"; #diag '$@: ' . ($eval ? $eval : "undef") . "\n"; ok($handler_called >= 1, 'Error handler called'); ok($errstate, 'Error handler called - state seen'); ok($errmsg, 'Error handler called - message seen'); ok(defined($errnative), 'Error handler called - native seen'); ok(!defined($evalret), 'Error handler called - error passed on'); ok($eval, 'Error handler called - error propagated'); # # check we can reset the error handler (bug in 1.14 prevented this) # ($errmsg, $errstate, $errnative, $handler_called) = (undef, undef, undef, 0); $dbh->{odbc_err_handler} = undef; $evalret = eval { # this sql is supposed to be invalid my $sth = $dbh->prepare('select * from'); $sth->execute; return 99; }; is($handler_called, 0, 'Handler cancelled'); # # check we can filter error messages in the handler by returning 0 from # the handler # ($errmsg, $errstate, $errnative, $handler_called) = (undef, undef, undef, 0); $dbh->{odbc_err_handler} = \&err_handler; $handler_return = 0; $evalret = eval { # this sql is supposed to be invalid my $sth = $dbh->prepare('select * from'); $sth->execute if $sth; return 99; }; $eval = $@; ok(!$eval, 'Handler filtered all messages'); is($evalret, 99, 'eval complete'); $dbh->disconnect; exit 0; # get rid of use once warnings print $DBI::errstr; DBD-ODBC-1.61/t/80_odbc_diags.t0000755000175000017500000000574612456455134015152 0ustar martinmartin#!/usr/bin/perl -w -I./t # # Test the experimental odbc_getdiagrec and odbc_getdiagfield # use strict; use warnings; use DBI; use Data::Dumper; use Test::More; use DBD::ODBC qw(:diags); my $has_test_nowarnings = 1; eval "require Test::NoWarnings"; $has_test_nowarnings = undef if $@; BEGIN { if (!defined $ENV{DBI_DSN}) { plan skip_all => "DBI_DSN is undefined"; } } # header fields: #define SQL_DIAG_CURSOR_ROW_COUNT (-1249) #define SQL_DIAG_DYNAMIC_FUNCTION 7 #define SQL_DIAG_DYNAMIC_FUNCTION_CODE 12 #define SQL_DIAG_NUMBER 2 #define SQL_DIAG_RETURNCODE 1 #define SQL_DIAG_ROW_COUNT 3 my @hdr_fields = (SQL_DIAG_CURSOR_ROW_COUNT, SQL_DIAG_DYNAMIC_FUNCTION, SQL_DIAG_DYNAMIC_FUNCTION_CODE, SQL_DIAG_NUMBER, SQL_DIAG_RETURNCODE, SQL_DIAG_ROW_COUNT); # record fields: #define SQL_DIAG_CLASS_ORIGIN 8 #define SQL_DIAG_COLUMN_NUMBER (-1247) #define SQL_DIAG_CONNECTION_NAME 10 #define SQL_DIAG_MESSAGE_TEXT 6 #define SQL_DIAG_NATIVE 5 #define SQL_DIAG_ROW_NUMBER (-1248) #define SQL_DIAG_SERVER_NAME 11 #define SQL_DIAG_SQLSTATE 4 #define SQL_DIAG_SUBCLASS_ORIGIN 9 my @record_fields = (SQL_DIAG_CLASS_ORIGIN, SQL_DIAG_COLUMN_NUMBER, SQL_DIAG_CONNECTION_NAME, SQL_DIAG_MESSAGE_TEXT, SQL_DIAG_NATIVE, SQL_DIAG_ROW_NUMBER, SQL_DIAG_SERVER_NAME, SQL_DIAG_SQLSTATE, SQL_DIAG_SUBCLASS_ORIGIN); sub get_fields { my ($h, $record) = @_; foreach (@hdr_fields, @record_fields) { eval { my $x = $h->odbc_getdiagfield($record, $_); note("$_ = " . ($x ? $x : 'undef') . "\n"); }; if ($@) { note("diag field $_ errored\n"); } } } my $h = DBI->connect(); unless($h) { BAIL_OUT("Unable to connect to the database ($DBI::errstr)\nTests skipped.\n"); exit 0; } my $dbname = $h->get_info(17); # DBI::SQL_DBMS_NAME if ($dbname =~ /sqlite/i) { plan skip_all => "sqlite core dumps with this test - I can't find anywhere to report it"; } $h->{RaiseError} = 1; $h->{PrintError} = 0; my ($s, @diags); @diags = $h->odbc_getdiagrec(1); is(scalar(@diags), 0, 'no dbh diags after successful connect') or explain(@diags); my $ok = eval { $h->get_info(9999); # should fail as there is no 9999 info value 1; }; ok(!$ok, "SQLGetInfo fails"); @diags = $h->odbc_getdiagrec(1); is(scalar(@diags), 3, ' and 3 diag fields returned'); note(Data::Dumper->Dump([\@diags], [qw(diags)])); get_fields($h, 1); @diags = $h->odbc_getdiagrec(2); is(scalar(@diags), 0, ' and no second record diags'); $ok = eval { # some drivers fail on the prepare - some don't fail until execute $s = $h->prepare(q/select * from table_does_not_exist/); $s->execute; 1; }; ok(!$ok, "select on non-existant table fails"); if ($s) { @diags = $s->odbc_getdiagrec(1); is(scalar(@diags), 3, ' and 3 diag fields returned'); note(Data::Dumper->Dump([\@diags], [qw(diags)])); get_fields($s, 1); } Test::NoWarnings::had_no_warnings() if ($has_test_nowarnings); done_testing(); DBD-ODBC-1.61/t/12blob.t0000755000175000017500000000734713614543165013644 0ustar martinmartin#!/usr/bin/perl -w -I./t # # blob tests # currently tests you can insert a clob with various odbc_putdata_start settings # use Test::More; use strict; $| = 1; my $has_test_nowarnings = 1; eval "require Test::NoWarnings"; $has_test_nowarnings = undef if $@; my $tests = 24; $tests += 1 if $has_test_nowarnings; plan tests => $tests; my $dbh; # can't seem to get the imports right this way use DBI qw(:sql_types); use_ok('ODBCTEST'); sub tidyup { if ($dbh) { #diag "Tidying up\n"; eval { local $dbh->{PrintWarn} = 0; local $dbh->{PrintError} = 0; $dbh->do(q/drop table DBD_ODBC_drop_me/); }; } } BEGIN { if (!defined $ENV{DBI_DSN}) { plan skip_all => "DBI_DSN is undefined"; } } END { tidyup(); Test::NoWarnings::had_no_warnings() if ($has_test_nowarnings); } my $ev; $dbh = DBI->connect(); unless($dbh) { BAIL_OUT("Unable to connect to the database $DBI::errstr\nTests skipped.\n"); exit 0; } tidyup(); my $putdata_start = $dbh->{odbc_putdata_start}; is($putdata_start, 32768, 'default putdata_start'); my $type_info_all = $dbh->type_info_all(); ok($type_info_all, "type_info_all") or BAIL_OUT("type_info_all failed"); my $map = shift @{$type_info_all}; my ($type_name, $type); while (my $row = shift @{$type_info_all}) { #diag("$row->[$map->{TYPE_NAME}],$row->[$map->{DATA_TYPE}], $row->[$map->{COLUMN_SIZE}]"); next if (($row->[$map->{DATA_TYPE}] != SQL_WLONGVARCHAR) && ($row->[$map->{DATA_TYPE}] != SQL_LONGVARCHAR)); if ($row->[$map->{COLUMN_SIZE}] > 60000) { #diag("$row->[$map->{TYPE_NAME}] $row->[$map->{DATA_TYPE}] $row->[$map->{COLUMN_SIZE}]"); ($type_name, $type) = ($row->[$map->{TYPE_NAME}], $row->[$map->{DATA_TYPE}]); last; } } SKIP: { skip "ODBC Driver/Database has not got a big enough type", 21 if (!$type_name); #diag("Using type $type_name"); eval { $dbh->do(qq/create table DBD_ODBC_drop_me(a $type_name)/); }; $ev = $@; diag($ev) if $ev; ok(!$ev, "table DBD_ODBC_drop_me created"); SKIP: { skip "Cannot create test table", 17 if $ev; my $bigval = "x" x 30000; test($dbh, $bigval); test($dbh, $bigval, 500); $bigval = 'x' x 60000; test($dbh, $bigval, 60001); }; }; sub test { my ($dbh, $val, $putdata_start) = @_; my $rc; if ($putdata_start) { $dbh->{odbc_putdata_start} = $putdata_start; my $pds = $dbh->{odbc_putdata_start}; is($pds, $putdata_start, "retrieved putdata_start = set value"); } my $sth = $dbh->prepare(q/insert into DBD_ODBC_drop_me values(?)/); ok($sth, "prepare for insert"); SKIP: { skip "prepare failed", 3 unless $sth; $rc = $sth->execute($val); ok($rc, "insert clob"); SKIP: { skip "insert failed - skipping the retrieval test", 2 unless $rc; test_value($dbh, $val); }; }; $sth = undef; eval {$dbh->do(q/delete from DBD_ODBC_drop_me/); }; $ev = $@; diag($ev) if $ev; ok(!$ev, 'delete records from test table'); return; } sub test_value { my ($dbh, $value) = @_; local $dbh->{RaiseError} = 1; my $max = 60001; $max = 120001 if ($type == SQL_WLONGVARCHAR || $dbh->{odbc_has_unicode}); local $dbh->{LongReadLen} = $max; my $row = $dbh->selectall_arrayref(q/select a from DBD_ODBC_drop_me/); $ev = $@; diag($ev) if $ev; ok(!$ev, 'select test data back'); my $rc = is(length($row->[0]->[0]), length($value), "sizes of insert/select compare"); SKIP: { skip "sizes do not match", 1 unless $rc; is($row->[0]->[0], $value, 'data read back compares'); }; return; } DBD-ODBC-1.61/t/rt_61370.t0000755000175000017500000001007512254015275013733 0ustar martinmartin#!/usr/bin/perl -w -I./t # # rt 61370 # # Check DBD::ODBC handles MS SQL Server XML column type as Unicode # and that set magic is used internally to ensure length() returns the # correct value. # use Test::More; use strict; eval "require Test::NoWarnings"; my $has_test_nowarnings = ($@ ? undef : 1); #my $has_test_more_utf8 = 1; #eval "require Test::More::UTF8"; #$has_test_more_utf8 = undef if $@; binmode Test::More->builder->output, ":utf8"; binmode Test::More->builder->failure_output, ":utf8"; binmode STDOUT, ':utf8'; use DBI qw(:sql_types); use_ok('ODBCTEST'); my $dbh; BEGIN { if (!defined $ENV{DBI_DSN}) { plan skip_all => "DBI_DSN is undefined"; } } END { if ($dbh) { eval { local $dbh->{PrintWarn} = 0; local $dbh->{PrintError} = 0; $dbh->do(q/drop table PERL_DBD_RT_61370/); }; } Test::NoWarnings::had_no_warnings() if ($has_test_nowarnings); done_testing(); } $dbh = DBI->connect(); unless($dbh) { BAIL_OUT("Unable to connect to the database $DBI::errstr\nTests skipped.\n"); exit 0; } $dbh->{RaiseError} = 1; $dbh->{ChopBlanks} = 1; my ($txt_de, $txt_ru); { use utf8; $txt_de = 'Käse'; $txt_ru = 'Москва'; } my $dbms_name = $dbh->get_info(17); ok($dbms_name, "got DBMS name: $dbms_name"); # 2 my $dbms_version = $dbh->get_info(18); ok($dbms_version, "got DBMS version: $dbms_version"); # 3 my $driver_name = $dbh->get_info(6); ok($driver_name, "got DRIVER name: $driver_name"); # 4 my $driver_version = $dbh->get_info(7); ok($driver_version, "got DRIVER version $driver_version"); # 5 my ($ev, $sth); # this needs to be MS SQL Server and not the OOB driver if ($dbms_name !~ /Microsoft SQL Server/) { note('Not Microsoft SQL Server'); exit 0; } if ($driver_name =~ /esoobclient/) { note("Easysoft OOB"); exit 0; } if (!$dbh->{odbc_has_unicode}) { note('DBD::ODBC not built with unicode support'); exit 0; } eval { local $dbh->{PrintWarn} = 0; local $dbh->{PrintError} = 0; $dbh->do('drop table PERL_DBD_RT_61370'); }; # try and create a table with an XML column # if we cannot, we'll have to assume your SQL Server is too old # and skip the rest of the tests eval { $dbh->do('create table PERL_DBD_RT_61370 (a int primary key, b xml)'); }; $ev = $@; if ($@) { note("Failed to create test table with XML type - server too old and perhaps does not support XML column type ($ev)"); done_testing; exit 0; } pass('created test table'); eval { $sth = $dbh->prepare('INSERT into PERL_DBD_RT_61370 VALUES (?,?)'); }; $ev = $@; diag($ev) if $ev; ok(!$ev, 'prepare insert'); SKIP: { skip "Failed to prepare xml insert - $@", 8 if $ev; my @rowdata = ([1, "$txt_de"], [2, "$txt_ru"]); $ev = undef; foreach my $row(@rowdata) { $sth->bind_param(1, $row->[0]); $sth->bind_param(2, $row->[1]); eval {$sth->execute}; if ($@) { $ev = $@; fail('execute for insert'); # 1,2 } else { pass('execute for insert'); # 1,2 } } SKIP: { skip "Could not insert test data - $@", 6 if $ev; $sth = $dbh->prepare(q/select a,b from PERL_DBD_RT_61370 order by a/); ok($sth, 'prepare for select'); # 1 ok($sth->execute, 'execute for select'); # 2 $sth->bind_col(1, \my $pkey); # the SQL_WCHAR in the below call does nothing from DBD::ODBC 1.38_1 # as it became the deault and you cannot override the bind type: $sth->bind_col(2, \my $xml, {TYPE => SQL_WCHAR}); foreach my $row(@rowdata) { $sth->fetch; #diag(sprintf("%3u %s", length($row->[1]), $row->[1])); is($pkey, $row->[0], 'inserted/selected pkey match'); is($xml, $row->[1], 'inserted/selected strings match'); # 3,5 is(length($xml), length($row->[1]), 'inserted/selected string sizes match'); # 4,6 } }; }; eval { local $dbh->{PrintWarn} = 0; local $dbh->{PrintError} = 0; $dbh->do('drop table PERL_DBD_RT_61370'); }; DBD-ODBC-1.61/t/UChelp.pm0000644000175000017500000000217212250310263014062 0ustar martinmartinpackage UChelp; #use base 'Exporter'; use Test::More; BEGIN { use Exporter(); @ISA = qw(Exporter); @EXPORT=qw(&dumpstr &utf_eq_ok); } use strict; use warnings; sub dumpstr($) { my $str=shift; if (defined $str) { my ($f,$u)=utf8::is_utf8($str) ? ('\\x{%04X}','utf8') : ('\\x%02X','bytes'); (my $d=$str)=~s/([^\x20-\x7E])/sprintf($f,ord $1)/gse; return sprintf("[%s, %i chars] '%s'",$u,length($str),$d); } else { return 'undef'; } } sub utf_eq_ok($$$) { my ($a,$b,$msg)=@_; # I want to call Test::More routines in a way that makes this package invisible, # and shows the failed or passed line of the caller instead. # So I manipulate @_ and use goto \&func. (!defined($a) and !defined($b)) and return pass($msg); unless (defined($a) and defined($b)) { diag(defined($a) ? "Expected undef, got '$a'" : "Got undef, expected '$b'"); @_=($msg); goto \&fail; # see below for the reason of goto } if ($a eq $b) { @_=($msg); goto \&pass; } if ("\x{2a36}$a" eq "\x{2a36}$b") { # implicit upgrade to UTF8 @_=($msg); goto \&pass; } @_=(dumpstr($a),'eq',dumpstr($b),$msg); goto \&cmp_ok; } 1; DBD-ODBC-1.61/t/rt_43384.t0000755000175000017500000000377612254015253013746 0ustar martinmartin#!/usr/bin/perl -w -I./t use Test::More; use strict; $| = 1; my $has_test_nowarnings = 1; eval "require Test::NoWarnings"; $has_test_nowarnings = undef if $@; my $tests = 8; $tests += 1 if $has_test_nowarnings; plan tests => $tests; use DBI qw(:sql_types); use_ok('ODBCTEST'); #use_ok('Data::Dumper'); my $dbh; BEGIN { plan skip_all => "DBI_DSN is undefined" if (!defined $ENV{DBI_DSN}); } END { if ($dbh) { eval { local $dbh->{PrintWarn} = 0; local $dbh->{PrintError} = 0; $dbh->do(q/drop table PERL_DBD_rt_43384/); }; $dbh->disconnect; } Test::NoWarnings::had_no_warnings() if ($has_test_nowarnings); } $dbh = DBI->connect(); unless($dbh) { BAIL_OUT("Unable to connect to the database $DBI::errstr\nTests skipped.\n"); exit 0; } my $sth; my $dbname = $dbh->get_info(17); # DBI::SQL_DBMS_NAME SKIP: { skip "Microsoft Access tests not supported using $dbname", 7 unless ($dbname =~ /Access/i); eval { local $dbh->{PrintWarn} = 0; local $dbh->{PrintError} = 0; $dbh->do(q/drop table PERL_DBD_rt_43384/); }; pass('dropped test table'); eval {$dbh->do(q/create table PERL_DBD_rt_43384 (unicode_varchar text(200), unicode_text memo)/);}; my $ev = $@; ok(!$ev, 'created test table PERL_DBD_rt_43384'); SKIP: { skip 'failed to create test table', 2 if $ev; my $sth = $dbh->prepare(q/insert into PERL_DBD_rt_43384 values(?,?)/); ok($sth, 'insert prepared'); SKIP: { skip 'failed to prepare', 1 if !$sth; my $data = 'a' x 190; eval {$sth->execute($data, $data);}; $ev = $@; ok(!$ev, 'inserted into test table'); ok ($sth->bind_param(1, $data, {TYPE => SQL_VARCHAR})); ok ($sth->bind_param(2, $data, {TYPE => SQL_LONGVARCHAR})); eval {$sth->execute;}; $ev = $@; ok(!$ev, "inserted into test table with VARCHAR and LONGVARCHAR"); }; }; }; exit 0; DBD-ODBC-1.61/t/87_odbc_lob_read.t0000755000175000017500000000503712456454061015630 0ustar martinmartin#!/usr/bin/perl -w -I./t use strict; use warnings; use DBI qw(:sql_types); use Test::More; my $has_test_nowarnings = 1; eval "require Test::NoWarnings"; $has_test_nowarnings = undef if $@; my $dbh; my $bind_string = "frederickfrederick"; BEGIN { if (!defined $ENV{DBI_DSN}) { plan skip_all => "DBI_DSN is undefined"; } } my $not_sql_server; END { if ($dbh) { local $dbh->{PrintError} = 0; local $dbh->{PrintWarn} = 0; eval { $dbh->do(q/drop table DBD_ODBC_LOB_TEST/); }; } Test::NoWarnings::had_no_warnings() if ($has_test_nowarnings && !$not_sql_server); done_testing(); } my $h = DBI->connect(); unless($h) { BAIL_OUT("Unable to connect to the database ($DBI::errstr)\nTests skipped.\n"); exit 0; } $h->{RaiseError} = 0; $h->{PrintError} = 0; my $dbname = $h->get_info(17); # DBI::SQL_DBMS_NAME unless ($dbname =~ /Microsoft SQL Server/i) { $not_sql_server = 1; note("Not MS SQL Server"); plan skip_all => "Not MS SQL Server"; } eval { $h->do(q/drop table DBD_ODBC_LOB_TEST/); }; eval { $h->do(q/create table DBD_ODBC_LOB_TEST(a image)/); } or BAIL_OUT("Failed to create test table $@"); my $s = $h->prepare(q/insert into DBD_ODBC_LOB_TEST (a) values(?)/); ok($s, "Created test table"); $s->bind_param(1, $bind_string, {TYPE => SQL_BINARY}); ok($s->execute, "inserted test data") or BAIL_OUT($DBI::errstr); ok($s = $h->prepare(q/select a from DBD_ODBC_LOB_TEST/), "preparing select") or BAIL_OUT("cannot select test data $DBI::errstr"); ok($s->execute, "executing select") or BAIL_OUT("execute $DBI::errstr"); ok($s->bind_col(1, undef, {TreatAsLOB => 1}), "binding"); ok($s->fetch, "fetching"); getit($s, SQL_BINARY); $s->execute; $s->fetch; getit($s, SQL_BINARY); sub getit{ my ($s, $type) = @_; my $total = 0; my $first = 1; my $fetched = ''; my $len; while($len = $s->odbc_lob_read(1, \my $x, 8, {TYPE => $type})) { if ($first) { if ($type == SQL_BINARY) { is($len, 8, "correct chunk size"); } else { is($len, 7, "correct chunk size"); } } #diag("len=$len, x=$x, ", length($x)); $total += $len; $first = 0; $fetched .= $x; } is($len, 0, "0 at end"); is($total, length($bind_string), "received correct amount of bytes"); is($fetched, $bind_string, "data correct"); my $x; $len = $s->odbc_lob_read(1, \$x, 8); is($len, 0, "0 at end after another read"); } $s->finish; $h->disconnect; DBD-ODBC-1.61/t/rt_81911.t0000755000175000017500000000516012254015321013725 0ustar martinmartin#!/usr/bin/perl -w -I./t # # rt 81911 # # New odbc_rows method and change to silently truncating affected rows # from execute. Can't think of a reasonable way of testing the latter as # I cannot imagine anyone wants millions of rows inserting into their # database during testing. # use Test::More; use strict; use DBI; use_ok('ODBCTEST'); eval "require Test::NoWarnings"; my $has_test_nowarnings = ($@ ? undef : 1); my $dbh; BEGIN { if (!defined $ENV{DBI_DSN}) { plan skip_all => "DBI_DSN is undefined"; } } END { if ($dbh) { local $dbh->{PrintWarn} = 0; local $dbh->{PrintError} = 0; $dbh->do(q/drop table PERL_DBD_RT_81911/); } Test::NoWarnings::had_no_warnings() if ($has_test_nowarnings); done_testing(); } $dbh = DBI->connect(); unless($dbh) { BAIL_OUT("Unable to connect to the database $DBI::errstr\nTests skipped.\n"); exit 0; } $dbh->{RaiseError} = 0; $dbh->do(q/create table PERL_DBD_RT_81911 (a int)/) or BAIL_OUT("Failed to create test table " . $dbh->errstr); # insert one row and check my $s = $dbh->prepare(q/insert into PERL_DBD_RT_81911 values(?)/) or BAIL_OUT("Failed to prepare insert " . $dbh->errstr); my $affected = $s->execute(1) or BAIL_OUT("Failed to execute insert " . $dbh->errstr); is($affected, 1, "affected from execute insert"); is($s->odbc_rows, $affected, "execute and odbc_rows agree on insert"); # insert a second row and check $affected = $s->execute(2) or BAIL_OUT("Failed to execute insert 2 " . $dbh->errstr); is($affected, 1, "affected from execute insert"); is($s->odbc_rows, $affected, "execute and odbc_rows agree on insert 2 "); # test update with no rows affected $s = $dbh->prepare(q/update PERL_DBD_RT_81911 set a = 1 where a = ?/) or BAIL_OUT("Failed to prepare update " . $dbh->errstr); $affected = $s->execute(3) or BAIL_OUT("Failed to execute update " . $dbh->errstr); is($affected, '0E0', "affected from execute update none"); is($s->odbc_rows, $affected, "execute and odbc_rows agree on update none"); # test update with 1 row affected $affected = $s->execute(1) or BAIL_OUT("Failed to execute update " . $dbh->errstr); is($affected, 1, "affected from execute update 1"); is($s->odbc_rows, $affected, "execute and odbc_rows agree on update 1"); # test update with 2 rows affected $s = $dbh->prepare(q/update PERL_DBD_RT_81911 set a = 1 where a > 0/) or BAIL_OUT("Failed to prepare update 2 " . $dbh->errstr); $affected = $s->execute or BAIL_OUT("Failed to execute update 2" . $dbh->errstr); is($affected, 2, "affected from execute update 2"); is($s->odbc_rows, $affected, "execute and odbc_rows agree on update 2"); DBD-ODBC-1.61/t/50_odbc_utf8_on.t0000755000175000017500000000106512254015060015412 0ustar martinmartin#!/usr/bin/perl -w -I./t use Test::More; use strict; $| = 1; plan tests => 3; use DBI; my $dbh; BEGIN { plan skip_all => "DBI_DSN is undefined" unless($ENV{DBI_DSN}); } $dbh = DBI->connect(); unless($dbh) { BAIL_OUT("Unable to connect to the database $DBI::errstr\nTests skipped.\n"); exit 0; } ok(exists($dbh->{odbc_utf8_on}), "odbc_utf8_on exists with value=$dbh->{odbc_utf8_on}"); is($dbh->{odbc_utf8_on}, 0, "odbc_utf8_on is off by default"); $dbh->{odbc_utf8_on} = 1; is($dbh->{odbc_utf8_on}, 1, "odbc_utf8_on set value=$dbh->{odbc_utf8_on}"); DBD-ODBC-1.61/t/rt_46597.t0000755000175000017500000001204612254015256013750 0ustar martinmartin#!/usr/bin/perl -w -I./t use Test::More; use strict; $| = 1; my $has_test_nowarnings = 1; eval "require Test::NoWarnings"; $has_test_nowarnings = undef if $@; my $tests = 6; $tests += 1 if $has_test_nowarnings; plan tests => $tests; use DBI qw(:sql_types); use_ok('ODBCTEST'); # 1 #use_ok('Data::Dumper'); # 2 my $dbh; BEGIN { plan skip_all => "DBI_DSN is undefined" if (!defined $ENV{DBI_DSN}); } END { if ($dbh) { eval { local $dbh->{PrintWarn} = 0; local $dbh->{PrintError} = 0; $dbh->do(q/drop table PERL_DBD_rt_46597/); }; $dbh->disconnect; } Test::NoWarnings::had_no_warnings() if ($has_test_nowarnings); # 8 } $dbh = DBI->connect(); unless($dbh) { BAIL_OUT("Unable to connect to the database $DBI::errstr\nTests skipped.\n"); exit 0; } my $sth; $dbh->{RaiseError} = 1; my $dbname = $dbh->get_info(17); # DBI::SQL_DBMS_NAME SKIP: { skip "Microsoft Access tests not supported using $dbname", 5 unless ($dbname =~ /Access/i); eval { local $dbh->{PrintWarn} = 0; local $dbh->{PrintError} = 0; $dbh->do(q/drop table PERL_DBD_rt_46597/); }; pass('dropped test table'); # 3 eval {$dbh->do(q{CREATE TABLE PERL_DBD_RT_46597 (Sequence memo)})}; my $data = "GAAGCGGGATGAGCTCAAACTTAAAATCTCTGTTGCTTGCAACAGCGAATTGTAGTCTCGAGAAGCGTTTTCAAGGCGGATGCACAGTGCTCAAGTTGCTTGGAACGGCACATCGTAGAGGGTGACAATCCCGTACGTGGCACTGTGTACTGTTCACGATTCGCTTTCTATGAGTCGGGTTGCTTGGGAATGCAGCCCAAAATGGGAGGTAAACTCCTTCTAAAGCTAAATATTGGCACGAGACCGATAGCGAACAAGTACCGTGAGGGAAAGATGAAAAGCACTTTGAAAAGAAAGTTAATAGTACGTGAAACCGTTAGTAGGGAAGCGCATGGAATTAGCAATACACTGTCGAGATTCAGGCGGGCGGCGATTGGTACGGCTGTTGTACGGATCTGAATGGACCGTCGGTGGTCGTCACTGGTTGCTGCCTGTTGCATTTCCCGGCAGTGTTCGTCAACAGGTGTTGGAACCGAGCGATAAGCCCCGCAGGAAGGTGGCTGGCTTCGGCTAGTGTTATAGCCTGTGGTGTGCGAGCTCGGGTCCGACAGAGGGGTTGCGGCACATGCTCTTTTGGGCTGGTCTCGTCTCTCTCGGCTGGTTGTCGACTATGGCGGACTGCGTGCAGTGCGCTTGAACTGCTGCCGGTCGTCGAGGGGCATCGGACACACATTGTGCCAAGGTTGTTGGCGGTCATATGGTTTCATACGACCCGTCTTGAAACACGGACCAAGGAGTCTAACATGTGTGCGAGTCTTTGGGTGATCGAAACCCGCAGGCACAATGAAAGTAAAGGCTGCTTGCAGCTGAAGTGAGATCTCCCGGTCTCGGCCGGGGGCGCATCATTGACCGACCTATTCTACTCCTAGAAAGGTTTGAGTAAGAGCACATCTGTTGGGACCCGAAAGATGGTGAACTATGCTTGAGTAGGGCGAAGCCAGAGGAAACTCTGGTGGAGGCTCGTAGCGATTCTGACGTGCAAATCGATCGTCAAACTTGAGTATAGGGGCGAAAGACTAATCGAACCATCTAGTAGCTGGTTCCCTCCGAAGTTTCCCTTAGGATAGCTGGAACTCGGAACAGTTTTATCAGGTAAAGCGAATGATTAGAGGTCTTAGGATTGAAACAATCTTAACCTATTCTCAAACTTTAAATTGGTAAGAAGCCCGGCTTGCTTAACTGAAGCAGGGCACAGAATGAGAGTTCTTAGTGGGCCATTTTTGGTAAGCAGAACTGGCGATGCGGGATGAACCGAACGCTGAGTTAAGGCGTCTAAATCGACGCTCATCAGACCCCACAAAAGGTGTTGGTTGATCTAGACAGCAGGACGGTGGCCATGGAAGTCGGAATCCGCTAAGGAGTGTGTAACAACCCACCTGCCGAATCAACTAGCCCTGAAAATGGATGACGCTCAAGCGTCGTGCCTATACTCAGCCGTCAACGTAAATAGCGAAGCGTTGACGAGTAGGAGGGCGTGGGGATCGTGACGCAGCCTTTGGCGTGAGCCTGGGTGAAACGGTCTCTAGTGAAGATCTTGGTGGTAGTAGCAAATATTCAAATGAGAACTTTGAAGACCGAAGTGGAGAAAGGTTCCATGTGAACAGCAGTTGGACATGGGTTAGTCGATCCTAAGAGATAGGGAAACTCCGTTTCAAAGTGTCCGATCTTGGACCGTTTATCGAAAGGGAATCGGGTTAATATTCCCGAACCAGAACGCGGATATTCTAGCCTCTCGGGGTTAGATGTGCGGTAACGCAACTGAACTCGGAGACGTCGGCAGGGGCCCTGGGAAGAGTTCTCTTTTCTTGTTAACGACCTGACACCATGGAATCTGATTGCCAGGAGATATGGTTTGATGGTCGGTAAAGCACCACACTTCTTGTGGTGTCCGGTGCGCTCCTGAAGGCCCTTGAAAATCCGAGGGAAAGATTGATTTTCGCGTCTGTTCGTACTCATAACCGCAGCAGGTCTCCAAGGTGAGCAGCCTCTGGTCGATAGAACAATGTAGGTAAGGGAAGTCGGCAAAATAGATCCGTAACTTCGGGAAAAGGATTGGCTCTAAGGATTGGGTCTGTCGGGCTGAGACTTGAAGCGGGCGGCACCGACTCGGACTGGCTGTGGCCTCTCGGGGCTATGGTTGGACTGGGAAGGAACTGCGCGTGGATTGGCCCAGCTATGCTCGCAAGAGCAGTTCGGCAGGCAATTAACAATCAACTTAGAACTGGTACGGACAAGGGGAATCCGACTGTTTAATTAAAACAAAGCATTGCGATGGCCGGAAACGGTGTTGACGCAATGTGATTTCTGCCCAGTGCTCTGAATGTCAAAGTGAAGAAATTCAACCAAGCGCGGGTAAACGGCGGGAGTAACTATGACTCTCTTAAGGTAGCCAAATGCCTCGTCATCTAATTAGTGACGCGCATGAATGGATTAACGAGATTCCCACTGTCCCTATCTACTATCTAGCGAAACCACAGCCAAGGGAACGGGCTTGGCAAAATCAGCGGGGAAAGAAGACCCTGTTGAGCTTGACTCTAGTCTGACTCTGTGAAAAGACATAGGAGGTGTAGAATAGGTGGGAGCAGCAATGCAACAGTGAAATACCACTACTCTTATAGTTTTTTTACTTATTCGATTGAGCGGAAGCGAGCTTCACGGCTCATTTTCTAGAATTAAGGCCCCATTGGCGGGTCGATCCGTGTCGAAGACACTGTCAGGTTGGGAGTTTGGCTGGGGCGGCACATCTGTCAAATGATAACGCAGGTGTCCTAAGGTGAGCTCAATGAGAACGGAAATCTCATGTAGAACAAAAGGGTAAAAGCTCACTTGATTTTGATTTTCAGTATGAATACAAACTGTGAAAGCATGGCCTATCGATCCTTTAGTCTTTAGGAGTTTTAAGCTAGAGGTGTCAGAAAAGTTACCACAGGGATAACTGGCTTGTGGCAGCCAAGCGTTCATAGCGACGTTGCTTTTTGATCCTTCGATGTCGGCTCTTCCTATCATTGTGAAGCAGAATTCACCAAGTGTTGGATTGTTCACCCACTAATAGGGAACGTGAGCTGGGTTTAGACCGTCGTGAGACAGGTTAGTTTTACCCTACTGATGAAGTGTTGTTGCAATAGTAATTCTGCTCAGTACGAGAGGAACCGCAGATTCAGACAATTGGCATTTGCACTTGCTTGAAAAGGCAATGGTG"; my $ev = $@; ok(!$ev, 'created test table PERL_DBD_rt_46597'); # 4 SKIP: { skip 'failed to create test table', 3 if $ev; my $sth = $dbh->prepare( q{INSERT INTO PERL_DBD_rt_46597 values (?)}) || die ($DBI::errstr); ok($sth, 'insert prepared'); # 5 SKIP: { skip 'failed to prepare', 2 if !$sth; ok($sth->bind_param(1, $data, DBI::SQL_LONGVARCHAR), 'parameter bound'); # 6 eval {$sth->execute($data)}; $ev = $@; ok(!$ev, "inserted into test table with sticky parameter type"); # 7 }; }; }; exit 0; DBD-ODBC-1.61/t/07bind.t0000755000175000017500000001461612254015007013627 0ustar martinmartin#!/usr/bin/perl -w -I./t use Test::More; $| = 1; my $has_test_nowarnings = 1; eval "require Test::NoWarnings"; $has_test_nowarnings = undef if $@; my $tests = 25; $tests += 1 if $has_test_nowarnings; plan tests => $tests; # use_ok('DBI', qw(:sql_types)); # can't seem to get the imports right this way use DBI qw(:sql_types); use_ok('ODBCTEST'); BEGIN { if (!defined $ENV{DBI_DSN}) { plan skip_all => "DBI_DSN is undefined"; } } END { Test::NoWarnings::had_no_warnings() if ($has_test_nowarnings); } my $dbh = DBI->connect(); unless($dbh) { BAIL_OUT("Unable to connect to the database $DBI::errstr\nTests skipped.\n"); exit 0; } $rc = ok(ODBCTEST::tab_create($dbh), "Create tables"); my @data = ( [ 1, 'foo', 'foo varchar', "1998-05-13", "1998-05-13 00:01:00" ], [ 2, 'bar', 'bar varchar', "1998-05-14", "1998-05-14 00:01:00" ], [ 3, 'bletch', 'bletch varchar', "1998-05-15", "1998-05-15 00:01:00" ], [ 4, 'bletch4', 'bletch varchar', "1998-05-15", "1998-05-15 00:01:00.1" ], [ 5, 'bletch5', undef, "1998-05-15", "1998-05-15 00:01:00.23" ], [ 6, '', '', "1998-05-15", "1998-05-15 00:01:00.233" ], ); my $longstr = "This is a test of a string that is longer than 80 characters. It will be checked for truncation and compared with itself."; my $longstr2 = $longstr . " " . $longstr; my $longstr3 = $longstr2 . " " . $longstr2; my @data_long = ( [ 10, 'foo2', $longstr, "2000-05-13", "2000-05-13 00:01:00" ], [ 11, 'bar2', $longstr2, "2000-05-14", "2000-05-14 00:01:00" ], [ 12, 'bletch2', $longstr3, "2000-05-15", "2000-05-15 00:01:00" ], ); my $tab_insert_ok = 1; $rc = ODBCTEST::tab_insert_bind($dbh, \@data, 1); ok($rc, "Table insert test"); unless ($rc) { diag("Test 4 is known to fail often. It is not a major concern. It *may* be an indication of being unable to bind datetime values correctly.\n"); $tab_insert_ok = 0; # print "not " } $dbh->{LongReadLen} = 2000; is($dbh->{LongReadLen}, 2000, "Ensure long readlen set correctly"); $rc = tab_select($dbh, \@data); ok($rc, "Select tests"); $rc = ODBCTEST::tab_insert_bind($dbh, \@data_long, 1); ok($rc, "Insert with bind tests"); unless ($rc) { if ($tab_insert_ok) { diag("Since test #4 succeeded, this could be indicative of a problem with long inserting, with binding parameters.\n"); } else { diag("Since test #4 failed, this could be indicative of a problem with date time binding, as per #4 above.\n"); } } $rc = tab_select($dbh, \@data_long); ok($rc, "select long test data"); $rc = tab_update_long($dbh, \@data_long); ok($rc, "update long test data"); $rc = tab_select($dbh, \@data_long); ok($rc, "select long test data again"); # clean up! $rc = ODBCTEST::tab_delete($dbh); # test param values! my $sth = $dbh->prepare("insert into $ODBCTEST::table_name (COL_A, COL_C) values (?, ?)"); $sth->bind_param(1, 1, SQL_INTEGER); $sth->bind_param(2, "test", SQL_LONGVARCHAR); my $ref = $sth->{ParamValues}; is(ref($ref), 'HASH', 'ParamValues returns a hash ref'); is($ref->{1}, 1, "ParamValues test integer"); is($ref->{2}, "test", "Paramvalues test string"); # test param types $ref = $sth->{ParamTypes}; is(ref($ref), 'HASH', 'ParamValues returns a hash ref'); ok(exists($ref->{1}), 'p1 exists'); ok(exists($ref->{1}), 'p2 exists'); is(ref($ref->{1}), 'HASH', 'p1 is a hash reference'); is(ref($ref->{2}), 'HASH', 'p2 is a hash reference'); ok(exists($ref->{1}->{TYPE}), 'p1 TYPE exists'); ok(exists($ref->{2}->{TYPE}), 'p2 TYPE exists'); like($ref->{1}->{TYPE}, qr/^-?\d+$/, 'numeric SQL type on p1'); like($ref->{2}->{TYPE}, qr/^-?\d+$/, 'numeric SQL type on p2'); # test numbered parameters eval { $dbh->do("delete from $ODBCTEST::table_name"); $sth = $dbh->prepare( "insert into $ODBCTEST::table_name(COL_A, COL_C) values (:1, :2)"); $sth->bind_param("1", 1); $sth->bind_param("2", 2); $sth->execute; }; my $ev = $@; diag($ev) if $ev; ok(!$ev, 'insert with numbered placeholders'); is($sth->rows, 1, '...inserted one row'); # test named parameters eval { $dbh->do("delete from $ODBCTEST::table_name"); $sth = $dbh->prepare( "insert into $ODBCTEST::table_name(COL_A, COL_C) values (:three, :four)"); $sth->bind_param("three", 3); $sth->bind_param("four", 4); $sth->execute; }; $ev = $@; diag($ev) if $ev; ok(!$ev, 'insert with named placeholders'); is($sth->rows, 1, '...inserted one row'); # how to test "sticky" bind_param? # how about setting ODBC default bind_param to some number # then # clean up! $rc = ODBCTEST::tab_delete($dbh); exit(0); print $DBI::errstr; sub tab_select { my $dbh = shift; my $dref = shift; my @data = @{$dref}; my @row; my $dbname; $dbname = $dbh->get_info(17); # SQL_DBMS_NAME my $sth = $dbh->prepare("SELECT COL_A,COL_B,COL_C,COL_D FROM $ODBCTEST::table_name WHERE COL_A = ?") or return undef; my $bind_val; foreach (@data) { $bind_val = $_->[0]; $sth->bind_param(1, $bind_val, SQL_INTEGER); $sth->execute; while (@row = $sth->fetchrow()) { # print "$row[0]|$row[1]|$row[2]|\n"; if ($row[0] != $bind_val) { print "Bind value failed! bind value = $bind_val, returned value = $row[0]\n"; return undef; } if (!defined($row[2]) && !defined($_->[2])) { # ok... } else { if (!defined($row[2] && $dbname =~ /Oracle/)) { # Oracle typically treats empty blanks as NULL in varchar, so that's what we should # expect! $row[2] = ""; } if ($row[2] ne $_->[2]) { print "Column C value failed! bind value = $bind_val, returned values = $row[0]|$row[1]|$row[2]|$row[3]\n"; return undef; } } } } return 1; } sub tab_update_long { my $dbh = shift; my $dref = shift; my @data = @{$dref}; my $sth = $dbh->prepare(<<"/"); UPDATE $ODBCTEST::table_name SET COL_C = ? WHERE COL_A = ? / unless ($sth) { warn $DBI::errstr; return 0; } $sth->{PrintError} = 1; foreach (@data) { # change the data... $_->[2] .= " " . $_->[2]; my $row = ODBCTEST::get_type_for_column($dbh, 'COL_C'); $sth->bind_param(1, $_->[2], { TYPE => $row->{DATA_TYPE} }); $row = ODBCTEST::get_type_for_column($dbh, 'COL_A'); $sth->bind_param(2, $_->[0], { TYPE => $row->{DATA_TYPE} }); return 0 unless $sth->execute; } 1; } __END__ DBD-ODBC-1.61/t/45_unicode_varchar.t0000755000175000017500000001755712250310263016215 0ustar martinmartin#!/usr/bin/perl -w -I./t # # Test insertion into varchar columns using unicode and codepage chrs # Must be a unicode build of DBD::ODBC # Currently needs MS SQL Server # use open ':std', ':encoding(utf8)'; use Test::More; use strict; use Data::Dumper; $| = 1; use DBI qw(:utils); use DBI::Const::GetInfoType; my $has_test_nowarnings = 1; eval "require Test::NoWarnings"; $has_test_nowarnings = undef if $@; my $dbh; BEGIN { if ($] < 5.008001) { plan skip_all => "Old Perl lacking unicode support"; } elsif (!defined $ENV{DBI_DSN}) { plan skip_all => "DBI_DSN is undefined"; } } END { # tidy up if ($dbh) { local $dbh->{PrintError} = 0; local $dbh->{PrintWarn} = 0; eval { $dbh->do(q/drop table PERL_DBD_TABLE1/); }; } } # get the server, database and table collations sub collations { my ($h, $table) = @_; # so we can use :: not meaning placeholders $h->{odbc_ignore_named_placeholders} = 1; # get database name to use later when finding collation for table my $database_name = $h->get_info($GetInfoType{SQL_DATABASE_NAME}); diag "Database: ", $database_name; # now find out the collations # server collation: my $r = $h->selectrow_arrayref( q/SELECT CONVERT (varchar, SERVERPROPERTY('collation'))/); diag "Server collation: ", $r->[0], "\n"; # database collation: $r = $h->selectrow_arrayref( q/SELECT CONVERT (varchar, DATABASEPROPERTYEX(?,'collation'))/, undef, $database_name); diag "Database collation: ", $r->[0]; # now call sp_help to find out about our table # first result-set should be name, owner, type and create datetime # second result-set should be: # column_name, type, computed, length, prec, scale, nullable, trimtrailingblanks, # fixedlennullinsource, collation # third result-set is identity columns # fourth result-set is row guilded columns # there are other result-sets depending on the object # sp_help -> http://technet.microsoft.com/en-us/library/ms187335.aspx my $column_collation; diag "Calling sp_help for table:"; my $s = $h->prepare(q/{call sp_help(?)}/); $s->execute($table); my $result_set = 1; do { my $rows = $s->fetchall_arrayref; if ($result_set <= 2) { foreach my $row (@{$rows}) { diag join(",", map {$_ ? $_ : 'undef'} @{$row}); } } if ($result_set == 2) { foreach my $row (@{$rows}) { diag "column:", $row->[0], " collation:", $row->[9], "\n"; $column_collation = $row->[9]; } } $result_set++; } while $s->{odbc_more_results}; # now using the last column collation from above find the codepage $r = $h->selectrow_arrayref( q/SELECT COLLATIONPROPERTY(?, 'CodePage')/, undef, $column_collation); diag "Code page for column collation: ", $r->[0]; } # output various codepage information sub code_page { eval {require Win32::API::More}; if ($@) { diag("Win32::API::More not available"); return; } Win32::API::More->Import("kernel32", "UINT GetConsoleOutputCP()"); Win32::API::More->Import("kernel32", "UINT GetACP()"); my $cp = GetConsoleOutputCP(); diag "Current active console code page: $cp\n"; $cp = GetACP(); diag "active code page: $cp\n"; 1; } # given a string call diag to output the ord of each character sub ords { my $str = shift; use bytes; diag " ords of output string:"; foreach my $s(split(//, $str)) { diag sprintf("%x", ord($s)), ","; } } # read back the length of the data inserted according to the db and the data # inserted (although nothing is done with the latter right now). # given a perl expected length and a db expected length check them # given a hex string of bytes the data should look like when cast to a # binary check the inserted data matches what we expect. sub show_it { my ($h, $expected_perl_length, $expected_db_length, $hex) = @_; my $r = $h->selectall_arrayref(q/select len(a), a from PERL_DBD_TABLE1 order by b asc/); diag( Dumper($r)); foreach my $row(@$r) { is($row->[0], shift @{$expected_db_length}, "db character length") or diag("dsc: " . data_string_desc($row->[0])); if (!is(length($row->[1]), shift @{$expected_perl_length}, "expected perl length")) { diag(data_string_desc($row->[1])); ords($row->[1]); } } if ($hex) { foreach my $hex_val(@$hex) { $r = $h->selectrow_arrayref(q/select count(*) from PERL_DBD_TABLE1 where cast(a as varbinary(100)) = / . $hex_val); is($r->[0], 1, "hex comparison $hex_val"); } } $h->do(q/delete from PERL_DBD_TABLE1/); } # insert the string into the database # daig output info about the inserted data sub execute { my ($s, @strings) = @_; diag " INPUT:"; foreach my $string(@strings) { #diag " input string: $string"; diag " data_string_desc of input string: ", data_string_desc($string); diag " ords of input string: "; foreach my $s(split(//, $string)) { diag sprintf("%x,", ord($s)); } { diag " bytes of input string: "; use bytes; foreach my $s(split(//, $string)) { diag sprintf("%x,", ord($s)); } } } ok($s->execute(@strings), "execute"); } $dbh = DBI->connect(); unless($dbh) { BAIL_OUT("Unable to connect to the database $DBI::errstr\nTests skipped.\n"); exit 0; } my $driver_name = $dbh->get_info($GetInfoType{SQL_DRIVER_NAME}); diag "Driver: ", $driver_name; $dbh->{RaiseError} = 1; eval {local $dbh->{PrintWarn} =0; $dbh->{PrintError} = 0;$dbh->do(q/drop table PERL_DBD_TABLE1/)}; my $dbname = $dbh->get_info($GetInfoType{SQL_DBMS_NAME}); if ($dbname !~ /Microsoft SQL Server/i) { note "Not MS SQL Server"; plan skip_all => "Not MS SQL Server"; exit; } if (!$dbh->{odbc_has_unicode}) { note "Not a unicode build of DBD::ODBC"; plan skip_all => "Not a unicode build of DBD::ODBC"; exit 0; } if ($^O eq 'MSWin32') { if (!code_page()) { note "Win32::API not found"; } } eval { $dbh->do(q/create table PERL_DBD_TABLE1 (b integer, a varchar(100) collate Latin1_General_CI_AS)/); }; if ($@) { fail("Cannot create table with collation - $@"); done_testing(); exit 0; } collations($dbh, 'PERL_DBD_TABLE1'); my $sql = q/insert into PERL_DBD_TABLE1 (b, a) values(?, ?)/; my $s; # a simple unicode string my $unicode = "\x{20ac}\x{a3}"; diag "Inserting a unicode euro, utf8 flag on:\n"; $s = $dbh->prepare($sql); # redo to ensure no sticky params execute($s, 1, $unicode); show_it($dbh, [2], [2], ['0x80a3']); my $codepage; # a simple codepage string { use bytes; $codepage = chr(0xa3) . chr(0x80); # it is important this is different to $unicode } diag "Inserting a codepage/bytes string:\n"; $s = $dbh->prepare($sql); # redo to ensure no sticky params execute($s, 1, $codepage); show_it($dbh, [2], [2], ['0xa380']); # inserting a mixture of unicode chrs and codepage chrs per row in same insert # unicode first - checks we rebind the 2nd parameter as SQL_CHAR diag "Inserting a unicode followed by codepage chrs:\n"; $s = $dbh->prepare($sql); # redo to ensure no sticky params execute($s, 1, $unicode); execute($s, 2, $codepage); show_it($dbh, [2,2], [2,2], ['0x80a3', '0x80a3']); # inserting a mixture of unicode chrs and codepage chrs per row in same insert # codepage first - checks we rebind the 2nd parameter SQL_WCHAR diag "Inserting codepage chrs followed by unicode:\n"; $s = $dbh->prepare($sql); # redo to ensure no sticky params execute($s, 1, $codepage); execute($s, 2, $unicode); show_it($dbh, [2,2], [2,2], ['0xa380', '0x80a3']); Test::NoWarnings::had_no_warnings() if ($has_test_nowarnings); done_testing(); DBD-ODBC-1.61/t/rt_38977.t0000755000175000017500000001143612254015240013746 0ustar martinmartin#!/usr/bin/perl -w -I./t # # rt 38977 and 48304 # # test varbinary(MAX), varchar(MAX) and nvarchar(MAX) types in SQL Server # use Test::More; use strict; $| = 1; my $has_test_nowarnings = 1; eval "require Test::NoWarnings"; $has_test_nowarnings = undef if $@; my $tests = 14; $tests += 1 if $has_test_nowarnings; plan tests => $tests; # can't seem to get the imports right this way use DBI qw(:sql_types); use_ok('ODBCTEST'); # 1 my $dbh; BEGIN { if (!defined $ENV{DBI_DSN}) { plan skip_all => "DBI_DSN is undefined"; } } END { if ($dbh) { eval { local $dbh->{PrintWarn} = 0; local $dbh->{PrintError} = 0; $dbh->do(q/drop table PERL_DBD_RT_38977/); }; } Test::NoWarnings::had_no_warnings() if ($has_test_nowarnings); } $dbh = DBI->connect(); unless($dbh) { BAIL_OUT("Unable to connect to the database $DBI::errstr\nTests skipped.\n"); exit 0; } $dbh->{RaiseError} = 1; my $dbms_name = $dbh->get_info(17); ok($dbms_name, "got DBMS name: $dbms_name"); # 2 my $dbms_version = $dbh->get_info(18); ok($dbms_version, "got DBMS version: $dbms_version"); # 3 my $driver_name = $dbh->get_info(6); ok($driver_name, "got DRIVER name: $driver_name"); # 4 my $driver_version = $dbh->get_info(7); ok($driver_version, "got DRIVER version $driver_version"); # 5 my ($ev, $sth); SKIP: { skip "not SQL Server", 9 if $dbms_name !~ /Microsoft SQL Server/; skip "Easysoft OOB", 9 if $driver_name =~ /esoobclient/; my $major_version = $dbms_version; $major_version =~ s/^(\d+)\..*$/$1/; #diag("Major Version: $major_version\n"); skip "SQL Server version too old", 9 if $major_version < 9; eval { local $dbh->{PrintWarn} = 0; local $dbh->{PrintError} = 0; $dbh->do('drop table PERL_DBD_RT_38977'); }; eval { $dbh->do('create table PERL_DBD_RT_38977 (a VARCHAR(MAX))'); }; $ev = $@; ok(!$ev, 'create test table with varchar(max)'); # 6 SKIP: { skip "Failed to create test table", 2 if ($ev); eval { $sth = $dbh->prepare('INSERT into PERL_DBD_RT_38977 VALUES (?)'); }; $ev = $@; ok($sth && !$@, "prepare insert"); # 7 SKIP: { skip "Failed to prepare", 1 if ($ev); my $x = 'x' x 500000; eval { $sth->execute($x); }; $ev = $@; ok(!$ev, "execute insert"); # 8 if ($ev) { diag("Execute for insert into varchar(max) failed with $ev"); diag(q/Some SQL Server drivers such as the native client 09.00.1399 / . q/driver fail this test with a HY104, "Invalid precision error". / . qq/You have driver $driver_name at version $driver_version. / . q/There is a free upgrade from Microsoft of the native client driver /. q/to 10.00.1600 which you will need if you intend to insert / . q/into varchar(max) columns./); } }; }; eval { local $dbh->{PrintWarn} = 0; local $dbh->{PrintError} = 0; $dbh->do('drop table PERL_DBD_RT_38977'); }; eval { $dbh->do('create table PERL_DBD_RT_38977 (a VARBINARY(MAX))'); }; $ev = $@; ok(!$ev, 'create test table with varbinary(max)'); # 9 SKIP: { skip "Failed to create test table", 2 if ($ev); eval { $sth = $dbh->prepare('INSERT into PERL_DBD_RT_38977 VALUES (?)'); }; $ev = $@; ok($sth && !$@, "prepare insert"); # 10 SKIP: { skip "Failed to prepare", 1 if ($ev); my $x = 'x' x 500000; ok($sth->execute($x), "execute insert"); }; }; eval { local $dbh->{PrintWarn} = 0; local $dbh->{PrintError} = 0; $dbh->do('drop table PERL_DBD_RT_38977'); }; eval { $dbh->do('create table PERL_DBD_RT_38977 (a NVARCHAR(MAX))'); }; $ev = $@; ok(!$ev, 'create test table with nvarchar(max)'); # 11 SKIP: { skip "Failed to create test table", 2 if ($ev); eval { $sth = $dbh->prepare('INSERT into PERL_DBD_RT_38977 VALUES (?)'); }; $ev = $@; ok($sth && !$@, "prepare insert"); # 12 SKIP: { skip "Failed to prepare", 1 if ($ev); my $x = 'x' x 4001; ok($sth->execute($x), "execute insert"); # 13 }; }; }; #my $ev; # #eval {$h->do('drop table binary_meta');}; #$h->do('create table binary_meta (doc_id INTEGER NOT NULL, meta_name VARCHAR (255), meta_value VARCHAR(MAX), meta_idx INTEGER, from_ver BIGINT, to_ver BIGINT)'); #my $s = $h->prepare('INSERT into binary_meta VALUES (?, ?, ?, ?, ?, ?)'); #my $x = 'x' x 5000000; #$s->execute(1, 'fred', $x, 1, 1, 1); DBD-ODBC-1.61/t/ODBCTEST.pm0000644000175000017500000002365713254216011014125 0ustar martinmartin# # Package ODBCTEST # # This package is a common set of routines for the DBD::ODBC tests. # This is a set of routines to create, drop and test for existance of # a table for a given DBI database handle (dbh). # # This set of routines currently depends greatly upon some ODBC meta-data. # The meta data required is the driver's native type name for various ODBC/DBI # SQL types. For example, SQL_VARCHAR would produce VARCHAR2 under Oracle and TEXT # under MS-Access. # # the SQL_TIMESTAMP may be dubious on many platforms, but SQL_DATE was not supported # under Oracle, MS SQL Server or Access. Those are pretty common ones. # require 5.004; { package ODBCTEST; use DBI qw(:sql_types); use Test::More; $VERSION = '0.01'; $table_name = "perl_dbd_test"; $longstr = "THIS IS A STRING LONGER THAN 80 CHARS. THIS SHOULD BE CHECKED FOR TRUNCATION AND COMPARED WITH ITSELF."; $longstr2 = $longstr . " " . $longstr . " " . $longstr . " " . $longstr; # really dumb work around: # MS SQL Server 2000 (MDAC 2.5 and ODBC driver 2000.080.0194.00) have a # bug if the column is named C, CA, or CAS and there is a call to # SQLDescribeParam... there is an error, referring to a syntax error near # keyword 'by' I figured it's just best to rename the columns. # changed SQL_BIGINT below to -5, as DBI has removed that constant. # ODBC's value is -5. %TestFieldInfo = ( 'COL_A' => [SQL_SMALLINT,-5, SQL_TINYINT, SQL_NUMERIC, SQL_DECIMAL, SQL_FLOAT, SQL_REAL, SQL_INTEGER], 'COL_B' => [SQL_VARCHAR, SQL_CHAR, SQL_WVARCHAR, SQL_WCHAR], 'COL_C' => [SQL_LONGVARCHAR, -1, SQL_WLONGVARCHAR, SQL_VARCHAR], 'COL_D' => [SQL_TYPE_TIMESTAMP, SQL_TYPE_DATE, SQL_DATE, SQL_TIMESTAMP ], ); sub get_type_for_column { my $dbh = shift; my $column = shift; my $type; my $type_info_all; # yes, you can pass an array of types to type_info: $type_info = $dbh->type_info($TestFieldInfo{$column}); if (!$type_info) { my $types = $dbh->type_info_all; foreach my $t (@$types) { next if ref($t) ne 'ARRAY'; diag(join(",", map{$_ ? $_ : "undef"} @$t). "\n"); } BAIL_OUT("Unable to find a suitable test type for field $column"); } return $type_info; } sub tab_create { my $dbh = shift; $dbh->{PrintError} = 0; eval { $dbh->do("DROP TABLE $table_name"); }; $dbh->{PrintError} = 1; my $drvname = $dbh->get_info(6); # driver name # trying to use ODBC to tell us what type of data to use my $fields = undef; my $f; foreach $f (sort keys %TestFieldInfo) { # print "$f: $TestFieldInfo{$f}\n"; $fields .= ", " unless !$fields; $fields .= "$f "; # print "-- $fields\n"; my $row = get_type_for_column($dbh, $f); $fields .= $row->{TYPE_NAME}; if ($row->{CREATE_PARAMS}) { if ($drvname =~ /OdbcFb/i) { # Firebird ODBC driver seems to be badly broken - for # varchars it reports max size of 32765 when it is 4000 if ($row->{TYPE_NAME} eq 'VARCHAR') { $fields .= "(4000)"; } } elsif ($drvname =~ /Pg/) { ## No lengths ever for TEXT! } elsif ($drvname =~ /lib.*db2/) { # in DB2 a row cannot be longer than the page size which is usually 32K # but can be as low as 4K if ($row->{TYPE_NAME} eq 'VARCHAR') { diag("This seems to be db2 and as far as I am aware, you cannot have a row greater than your page size. When I last looked db2 says a varchar can be 32672 but if we use that here the row will very likely be larger than your page size. Also, even if we reduce the varchar but keep it above 3962 db2 seems to complain so we mangle it here to 3962. It does not seem right to me that SQLGetTypeInfo says a varchar can be 32672 and then it is limited to 3962. If you know better, please let me know."); $fields .= "(3962)"; } } elsif (!exists($row->{COLUMN_SIZE})) { # Postgres 9 seems to omit COLUMN_SIZE # however see discussion at # http://www.postgresql.org/message-id/5315E336.40603@vmware.com and # http://www.postgresql.org/message-id/5315E622.2010904@ntlworld.com # try and use old ODBC 2 PRECISION value if (exists($row->{PRECISION})) { $fields .= '(' . $row->{PRECISION} . ')'; } else { $fields .= '(4000)'; note("WARNING Your ODBC driver is broken - DBI's type_info method should return a hashref containing a key of COLUMN_SIZE and we got " . join(",", sort keys %$row)); } } else { $fields .= "($row->{COLUMN_SIZE})" if ($row->{CREATE_PARAMS} =~ /LENGTH/i); $fields .= "($row->{COLUMN_SIZE},0)" if ($row->{CREATE_PARAMS} =~ /PRECISION,SCALE/i); } } if ($f eq 'COL_A') { $fields .= " NOT NULL PRIMARY KEY "; } # print "-- $fields\n"; } # diag("Using fields: $fields\n"); my $sql = "CREATE TABLE $table_name ($fields)"; #diag($sql); $dbh->do($sql) or diag("Failed to create table - ", $dbh->errstr); } sub tab_delete { my $dbh = shift; $dbh->do("DELETE FROM $table_name"); } sub tab_exists { my $dbh = shift; my (@rows, @row, $rc); $rc = -1; unless ($sth = $dbh->table_info()) { diag("Can't list tables: $DBI::errstr\n"); return -1; } # TABLE_QUALIFIER,TABLE_OWNER,TABLE_NAME,TABLE_TYPE,REMARKS while ($row = $sth->fetchrow_hashref()) { # XXX not fully true. The "owner" could be different. Need to check! # In Oracle, testing $user against $row[1] works, but does NOT in SQL Server. # SQL server returns the device and something else I haven't quite taken the time # to figure it out, since I'm not a SQL server expert. Anyone out there? # (mine returns "dbo" for the owner on ALL my tables. This is obviously something # significant for SQL Server...one of these days I'll dig... if ((lc($table_name) eq lc($row->{TABLE_NAME}))) { # and (uc($user) eq uc($row[1]))) # qeDBF driver returns null for TABLE_OWNER my $owner = $row->{TABLE_OWNER} || '(unknown owner)'; # diag("$owner.$row->{TABLE_NAME}\n"); $rc = 1; last; } } $sth->finish(); $rc; } # # show various ways of inserting data without binding parameters. # Note, these are not necessarily GOOD ways to # show this... # @tab_insert_values = ( [1, 'foo', 'foo varchar', "{d '1998-05-11'}", "{ts '1998-05-11 00:00:00'}"], [2, 'bar', 'bar varchar', "{d '1998-05-12'}", "{ts '1998-05-12 00:00:00'}"], [3, "bletch", "bletch varchar", "{d '1998-05-10'}", "{ts '1998-05-10 00:00:00'}"], [4, "80char", $longstr, "{d '1998-05-13'}", "{ts '1998-05-13 12:00:00'}"], [5, "gt250char", $longstr2, "{d '1998-05-14'}", "{ts '1998-05-14 00:00:00'}"], ); sub tab_insert { my $dbh = shift; # qeDBF needs a space after the table name! foreach (@tab_insert_values) { my $row = ODBCTEST::get_type_for_column($dbh, 'COL_D'); # print "TYPE FOUND = $row->{DATA_TYPE}\n"; my $sql = "INSERT INTO $table_name (COL_A, COL_B, COL_C, COL_D) VALUES (" . join(", ", $_->[0], $dbh->quote($_->[1]), $dbh->quote($_->[2]), $_->[isDateType($row->{DATA_TYPE}) ? 3 : 4]). ")"; if ('Pg' eq $dbh->{Driver}{Name}) { $sql =~ s/{(?:ts|d) (.+?)}/$1/g; } #diag($sql); if (!$dbh->do($sql)) { diag($dbh->errstr); return 0; } } 1; } sub isDateType($) { my $type = shift; if ($type == SQL_DATE || $type == SQL_TYPE_DATE) { return 1; } else { return 0; } } sub tab_insert_bind { my $dbh = shift; my $dref = shift; my $handle_column_type = shift; my @data = @{$dref}; my $sth = $dbh->prepare("INSERT INTO $table_name (COL_A, COL_B, COL_C, COL_D) VALUES (?, ?, ?, ?)"); unless ($sth) { warn $DBI::errstr; return 0; } # $sth->{PrintError} = 1; foreach (@data) { my @row; if ($handle_column_type) { $row = ODBCTEST::get_type_for_column($dbh, 'COL_A'); # diag("Binding the value: $_->[0] type = $row->{DATA_TYPE}\n"); $sth->bind_param(1, $_->[0], { TYPE => $row->{DATA_TYPE}}); } else { $sth->bind_param(1, $_->[0]); } if ($handle_column_type) { $row = ODBCTEST::get_type_for_column($dbh, 'COL_B'); $sth->bind_param(2, $_->[1], { TYPE => $row->{DATA_TYPE} }); } else { $sth->bind_param(2, $_->[1]); } if ($handle_column_type) { $row = ODBCTEST::get_type_for_column($dbh, 'COL_C'); $sth->bind_param(3, $_->[2], { TYPE => $row->{DATA_TYPE} }); } else { $sth->bind_param(3, $_->[2]); } # print "SQL_DATE = ", SQL_DATE, " SQL_TIMESTAMP = ", SQL_TIMESTAMP, "\n"; $row = ODBCTEST::get_type_for_column($dbh, 'COL_D'); # diag("TYPE FOUND = $row[1]\n"); # if ($row[1] == SQL_TYPE_TIMESTAMP) { # $row[1] = SQL_TIMESTAMP; #} # print "Binding the date value: \"$_->[$row[1] == SQL_DATE ? 3 : 4]\"\n"; if ($handle_column_type) { $sth->bind_param(4, $_->[isDateType($row->{DATA_TYPE}) ? 3 : 4], { TYPE => $row->{DATA_TYPE} }); } else { # sigh, couldn't figure out how to get rid of the warning nicely, # so I turned it off!!! Now, I have to turn it back on due # to problems in other perl versions. $sth->bind_param(4, $_->[isDateType($row->{DATA_TYPE}) ? 3 : 4]); } return 0 unless $sth->execute; } 1; } 1; } DBD-ODBC-1.61/t/pod.t0000755000175000017500000000145712254015234013327 0ustar martinmartin#!perl use Test::More; use strict; use warnings; my $has_test_nowarnings = 1; eval "require Test::NoWarnings"; $has_test_nowarnings = undef if $@; END { Test::NoWarnings::had_no_warnings() if ($has_test_nowarnings); } my $basic_tests = 4; my $tests = $basic_tests; $tests += 1 if $has_test_nowarnings; plan tests => $tests; eval { require Test::Pod; Test::Pod->import; }; SKIP: { if (($@) || ($Test::Pod::VERSION < '1.00')) { skip "Test::Pod 1.00 required for testing POD", $basic_tests; } if (! -d '.git') { skip "Author test", $basic_tests; } my @pods = all_pod_files(); #diag("pod files: " . join(",", @pods)); foreach my $pod (@pods) { next if $pod !~ /(ODBC.pm)|(FAQ.pm)|(Changes.pm)|(TO_DO.pm)/; pod_file_ok($pod); } }; DBD-ODBC-1.61/t/odbc_describe_parameter.t0000755000175000017500000001063412254015220017344 0ustar martinmartin#!/usr/bin/perl -w -I./t # # Test odbc_describe_parameters # Should default to on but you can turn it off in the prepare or at the # connection level. # use Test::More; use strict; $| = 1; my $has_test_nowarnings = 1; eval "require Test::NoWarnings"; $has_test_nowarnings = undef if $@; my $tests = 17; $tests += 1 if $has_test_nowarnings; plan tests => $tests; use DBI qw(:sql_types); #1 use_ok('ODBCTEST'); my $dbh; BEGIN { if (!defined $ENV{DBI_DSN}) { plan skip_all => "DBI_DSN is undefined"; } } END { if ($dbh) { eval { local $dbh->{PrintWarn} = 0; local $dbh->{PrintError} = 0; $dbh->do(q/drop table PERL_DBD_drop_me/); }; $dbh->disconnect; } Test::NoWarnings::had_no_warnings() if ($has_test_nowarnings); } $dbh = DBI->connect(); unless($dbh) { BAIL_OUT("Unable to connect to the database $DBI::errstr\nTests skipped.\n"); exit 0; } my ($ev, $sth); eval { local $dbh->{PrintWarn} = 0; local $dbh->{PrintError} = 0; $dbh->do('drop table PERL_DBD_drop_me'); }; eval { $dbh->do('create table PERL_DBD_drop_me (a integer)'); }; $ev = $@; #2 diag($ev) if $ev; ok(!$ev, 'create test table with integer'); BAIL_OUT("Failed to create test table") if $ev; sub default { eval { $sth = $dbh->prepare('INSERT into PERL_DBD_drop_me VALUES (?)'); }; $ev = $@; diag($ev) if $ev; #3 ok($sth && !$@, "prepare insert"); SKIP: { skip "Failed to prepare", 1 if ($ev); eval { $sth->execute(1); }; $ev = $@; diag($ev) if $ev; #4 ok(!$@, "execute ok"); }; SKIP: { skip "Failed to execute", 1 if ($ev); my $pts = $sth->{ParamTypes}; #5 is(ref($pts), 'HASH', 'ParamTypes is a hash'); my @params = keys %$pts; #6 is(scalar(@params), 1, 'one parameter'); #use Data::Dumper; #diag(Dumper($pts->{$params[0]})); #7 # for drivers which don't have SQLDescribeParam the type will # be defaulted to SQL_VARCHAR or SQL_WVARCHAR ok(($pts->{$params[0]}->{TYPE} == SQL_INTEGER) || ($pts->{$params[0]}->{TYPE} == SQL_LONGVARCHAR) || ($pts->{$params[0]}->{TYPE} == SQL_WLONGVARCHAR) || ($pts->{$params[0]}->{TYPE} == SQL_WVARCHAR) || ($pts->{$params[0]}->{TYPE} == SQL_VARCHAR), 'integer parameter') or diag("Param type: " . $pts->{$params[0]}->{TYPE}); }; } sub on_prepare { eval { $sth = $dbh->prepare('INSERT into PERL_DBD_drop_me VALUES (?)', { odbc_describe_parameters => 0}); }; $ev = $@; diag($ev) if $ev; #8 ok($sth && !$@, "prepare insert"); SKIP: { skip "Failed to prepare", 1 if ($ev); eval { $sth->execute(1); }; $ev = $@; diag($ev) if $ev; #9 ok(!$@, "execute ok"); }; SKIP: { skip "Failed to execute", 1 if ($ev); my $pts = $sth->{ParamTypes}; #10 is(ref($pts), 'HASH', 'ParamTypes is a hash'); my @params = keys %$pts; #11 is(scalar(@params), 1, 'one parameter'); #use Data::Dumper; #diag(Dumper($pts->{$params[0]})); #12 ok(($pts->{$params[0]}->{TYPE} == 12) || ($pts->{$params[0]}->{TYPE} == -9), 'char parameter (prepare)') or diag($pts->{$params[0]}->{TYPE}); }; } sub on_connect { $dbh->{odbc_describe_parameters} = 0; eval { $sth = $dbh->prepare('INSERT into PERL_DBD_drop_me VALUES (?)'); }; $ev = $@; diag($ev) if $ev; #8 ok($sth && !$@, "prepare insert"); SKIP: { skip "Failed to prepare", 1 if ($ev); eval { $sth->execute(1); }; $ev = $@; diag($ev) if $ev; #9 ok(!$@, "execute ok"); }; SKIP: { skip "Failed to execute", 1 if ($ev); my $pts = $sth->{ParamTypes}; #10 is(ref($pts), 'HASH', 'ParamTypes is a hash'); my @params = keys %$pts; #11 is(scalar(@params), 1, 'one parameter'); #use Data::Dumper; #diag(Dumper($pts->{$params[0]})); #12 ok(($pts->{$params[0]}->{TYPE} == 12) || ($pts->{$params[0]}->{TYPE} == -9), 'char parameter (connect)'); }; } default(); on_prepare(); on_connect(); DBD-ODBC-1.61/t/02simple.t0000755000175000017500000003500713035730543014203 0ustar martinmartin#!perl -w -I./t use Test::More; use strict; use Config; use DBD::ODBC; $| = 1; my $has_test_nowarnings = 1; eval "require Test::NoWarnings"; $has_test_nowarnings = undef if $@; my $tests = 65; $tests += 1 if $has_test_nowarnings; plan tests => $tests; use_ok('DBI', qw(:sql_types)); use_ok('ODBCTEST'); #use_ok('Data::Dumper'); BEGIN { if (!defined $ENV{DBI_DSN}) { plan skip_all => "DBI_DSN is undefined"; } } END { Test::NoWarnings::had_no_warnings() if ($has_test_nowarnings); } my $dbh = DBI->connect(); unless($dbh) { BAIL_OUT("Unable to connect to the database ($DBI::errstr)\nTests skipped.\n"); exit 0; } my $driver_name; # Output DBMS which is useful when debugging cpan-testers output { diag("\n"); diag("Perl $Config{PERL_REVISION}.$Config{PERL_VERSION}.$Config{PERL_SUBVERSION}\n"); diag("osname=$Config{osname}, osvers=$Config{osvers}, archname=$Config{archname}\n"); diag("Using DBI $DBI::VERSION\n"); diag("Using DBD::ODBC $DBD::ODBC::VERSION\n"); diag("Using DBMS_NAME " . DBI::neat($dbh->get_info(17)) . "\n"); diag("Using DBMS_VER " . DBI::neat($dbh->get_info(18)) . "\n"); $driver_name = DBI::neat($dbh->get_info(6)); diag("Using DRIVER_NAME $driver_name\n"); diag("Using DRIVER_VER " . DBI::neat($dbh->get_info(7)) . "\n"); diag("odbc_has_unicode " . ($dbh->{odbc_has_unicode} || '') . "\n"); } # ReadOnly { # NOTE: the catching of warnings here needs a DBI > 1.628 local $dbh->{AutoCommit} = 0; my $warning; local $SIG{__WARN__} = sub {diag "AA:"; diag @_; $warning = 1}; $dbh->{ReadOnly} = 1; if ($warning) { diag "Your ODBC driver does not support setting ReadOnly"; } is($dbh->{ReadOnly}, 1, 'ReadOnly set'); $dbh->{ReadOnly} = 0; is($dbh->{ReadOnly}, 0, 'ReadOnly cleared'); } # # test private_attribute_info. # connection handles and statement handles should return a hash ref of # private attributes # SKIP: { skip "DBI too old for private_attribute_info", 3 if ($DBI::VERSION < 1.54); my $pai = $dbh->private_attribute_info(); #diag Data::Dumper->Dump([$pai], [qw(dbc_private_attribute_info)]); ok(defined($pai), 'dbc private_attribute_info result'); ok(ref($pai) eq 'HASH', 'dbc private_attribute_info is hashref'); ok(scalar(keys %{$pai}) >= 1, 'dbc private_attribute_info has some attributes'); }; SKIP: { skip "DBI too old for private_attribute_info", 3 if ($DBI::VERSION < 1.54); my $sql; my $drv = $dbh->get_info(17); if ($drv =~ /Oracle/i) { $sql = q/select 1 from dual/; } elsif ($drv =~ /Firebird/i) { $sql = q/select 1 from rdb$database/; } else { $sql = q/select 1/; } my $sth = $dbh->prepare($sql); my $pai = $sth->private_attribute_info(); #diag Data::Dumper->Dump([$pai], [qw(stmt_private_attribute_info)]); ok(defined($pai), 'stmt private_attribute_info result'); ok(ref($pai) eq 'HASH', 'stmt private_attribute_info is hashref'); ok(scalar(keys %{$pai}) >= 1, 'stmt private_attribute_info has some attributes'); $sth->finish; }; # # Test changing of AutoCommit - start by setting away from the default # $dbh->{AutoCommit} = 0; pass("Set Auto commit off"); is($dbh->{AutoCommit}, 0, 'Auto commit off retrieved'); $dbh->{AutoCommit} = 1; pass("Set Auto commit on"); is($dbh->{AutoCommit}, 1, "Auto commit on restored"); #### testing a simple select my $rc = 0; ok(ODBCTEST::tab_create($dbh), "create test table"); cmp_ok(ODBCTEST::tab_exists($dbh), '>=', 0, "test table exists"); ok(ODBCTEST::tab_insert($dbh), "insert test data"); ok(tab_select($dbh), "select test data"); $rc = undef; # # LongReadLen # my $lrl = $dbh->{LongReadLen}; ok(defined($lrl), 'Get LongReadLen starting value'); ok(DBI::looks_like_number($lrl), 'LongReadLen is numeric'); $dbh->{LongReadLen} = $lrl + 1; pass('Set LongReadLen'); is($dbh->{LongReadLen}, $lrl + 1, "Read changed LongReadLen back"); # # LongTruncOk # my $lto = $dbh->{LongTruncOk}; ok(defined($lto), 'Get LongTruncOk starting value'); $dbh->{LongTruncOk} = 1; pass('Set LongTruncOk on'); is($dbh->{LongTruncOk}, 1, "LongTruncOk on"); $dbh->{PrintError} = 0; is($dbh->{PrintError}, '', "Set Print Error"); # # check LongTruncOk works i.e. select a column longer than 50 # check truncated data agrees with LongReadLen # $dbh->{LongTruncOk} = 1; $dbh->{LongReadLen} = 50; my $max_col_len; ok(select_long($dbh, \$max_col_len, 1), "Select Long data, LongTruncOk"); ok(!defined($dbh->err), 'err not set on LongTruncOk handle'); # NOTE: there is an existing bug in DBD::ODBC that truncates to LongReadLen # + 1 instead of LongReadLen. Not fixed yet and failing test causes loads # of people to post saying it fails so change to test not more than # LongReadLen + 1. ok($max_col_len <= 51, 'Truncated column to LongReadLen') or diag("Got $max_col_len"); # now force an error and ensure we get a long truncated event. $dbh->{LongTruncOk} = 0; is($dbh->{LongTruncOk}, '', "Set Long TruncOk 0"); # Following test fails with FreeTDS 0.63 and 0.64 because FreeTDS does not # report a data truncation error and hence no error is raised and there # err, errstr and state are not set. $rc = select_long($dbh, \$max_col_len, 0); ok(!$rc, "Select Long Data failure"); ok($dbh->err, 'error set on truncated handle'); ok($dbh->errstr, 'errstr set on truncated handle'); ok($dbh->state, 'state set on truncated handle'); if ($rc && ($driver_name =~ /tdsodbc/)) { diag(qq/\nNOTE:\nFreeTDS fails the previous 4 tests because when you select a column greater\nthan 80 characters with LongTruncOk it does not generate a\n01004, "String data, right truncation error\n"/); } my $sth = $dbh->prepare("SELECT * FROM $ODBCTEST::table_name ORDER BY COL_A"); ok(defined($sth), "prepare select from table"); if ($sth) { ok($sth->execute(), "Execute select"); # the following 1 in the first arg is technically ok as ODBC says when the field descriptor # (the 2nd arg) is a header field (e.g. SQL_COLUMN_COUNT) the driver should ignore it. # However, some drivers (postgres) don't. You need to change the 1 to 0 for postgres to work. # BUT it gets worse. Postgres doesn't have SQLColattributes (and deprecated ODBC 2.0 API) # and so the driver manager maps calls to SQLColAttributes to SQLColAttribute. All would # be well, BUT unixODBC until version 2.3.5 has a bug in it where when it maps # SQLColAttributes to SQLColAttribute and you pass 0 for the column it will error with # with invalid descripto index because bookmarks are not enabled and it did not handle # SQL_COLUMN_COUNT - sigh. # So, the upshot is if I don't change the arg below all drivers I know except postgres # will work and if I do change the arg below all drivers on systems not with unixODBC 2.3.5 # (most now) will fail. my $colcount = $sth->func(1, 0, 'ColAttributes'); # 1 for col (unused) 0 for SQL_COLUMN_COUNT #diag("Column count is: $colcount\n"); is($colcount, $sth->{NUM_OF_FIELDS}, 'NUM_OF_FIELDS = ColAttributes(SQL_COLUMN_COUNT)'); my ($coltype, $colname, $i, @row); my $is_ok = 0; for ($i = 1; $i <= $sth->{NUM_OF_FIELDS}; $i++) { # $i is colno (1 based) 2 is for SQL_COLUMN_TYPE, 1 is for SQL_COLUMN_NAME $coltype = $sth->func($i, 2, 'ColAttributes'); # NOTE: changed below to uc (uppercase) as keys in TestFieldInfo are # uppercase and databases are not guaranteed to return column names in # uppercase. $colname = uc($sth->func($i, 1, 'ColAttributes')); #diag("$i: $colname = $coltype ", $coltype+1-1); if (grep { $coltype == $_ } @{$ODBCTEST::TestFieldInfo{$colname}}) { $is_ok++; } else { diag("Coltype $coltype for column $colname not found in list ", join(', ', @{$ODBCTEST::TestFieldInfo{$colname}}), "\n"); } } is($colcount, $is_ok, "Col count matches correct col count"); # print "not " unless $is_ok == $colcount; # print "ok 9\n"; $sth->finish; } else { fail("select didn't work, so column count won't work"); } $dbh->{RaiseError} = 0; is($dbh->{RaiseError}, '', "Set RaiseError 0"); $dbh->{PrintError} = 0; is($dbh->{PrintError}, '', "Set PrintError 0"); # # some ODBC drivers will prepare this OK, but not execute. # $sth = $dbh->prepare("SELECT XXNOTCOLUMN FROM $ODBCTEST::table_name"); $sth->execute() if $sth; if (!defined($DBI::errstr) || (length($DBI::errstr) == 0)) { fail("Error reported on bad query"); if ($driver_name =~ /tdsodbc/) { diag(qq/NOTE:\nfreeTDS 0.63 at least, fails the previous test because no error is returned\nfrom SQLPrepare or SQLExecute when you enter a\n"select non_existent_table_name from table" query.\nVersion 0.82 seems to have fixed this./); } } else { pass("Error reported on bad query"); } my $row = ODBCTEST::get_type_for_column($dbh, 'COL_D'); my $dateval; if (ODBCTEST::isDateType($row->{DATA_TYPE})) { $dateval = "{d '1998-05-13'}"; } else { $dateval = "{ts '1998-05-13 12:13:01'}"; } $sth = $dbh->prepare("SELECT COL_D FROM $ODBCTEST::table_name WHERE COL_D > $dateval"); ok(defined($sth), "date check select"); ok($sth->execute(), "date check execute"); my $count = 0; while (my @row = $sth->fetchrow) { $count++ if ($row[0]); # diag("$row[0]\n"); } is($count, 1, "date check rows"); $sth = $dbh->prepare("SELECT COL_A, COUNT(*) FROM $ODBCTEST::table_name GROUP BY COL_A"); ok($sth, "group by query prepare"); ok($sth->execute(), "group by query execute"); $count = 0; while (my @row = $sth->fetchrow) { $count++ if ($row[0]); # diag("$row[0], $row[1]\n"); } cmp_ok($count, '!=', 0, "group by query returned rows"); $rc = ODBCTEST::tab_delete($dbh); # Note, this test will fail if no data sources defined or if # data_sources is unsupported. my @data_sources = DBI->data_sources('ODBC'); #diag("Data sources:\n\t", join("\n\t",@data_sources),"\n\n"); # NOTE: data_sources may return 0 data sources if there are none # and we are using a DSN-less connection string cmp_ok($#data_sources, '>=', -1, "data sources test"); ok($dbh->ping, "test ping method"); is($dbh->{odbc_ignore_named_placeholders}, 0, "Attrib odbc_ignore_named_placeholders 0 to start"); $dbh->{odbc_ignore_named_placeholders} = 1; is($dbh->{odbc_ignore_named_placeholders}, 1, "Attrib odbc_ignore_named_placeholders set to 1"); my $dbh2 = DBI->connect(); ok(defined($dbh2), "test connecting twice to the same database"); $dbh2->disconnect; my $dbname; $dbname = $dbh->get_info(17); # SQL_DBMS_NAME # diag(" connected to $dbname\n"); ok(defined($dbname) && $dbname ne '', "database name is returned successfully"); #print "\nnot " unless (defined($dbname) && $dbname ne ''); #print "ok 17\n"; $sth = $dbh->prepare("select count(*) from $ODBCTEST::table_name"); $sth->execute; $sth->fetch; ok($sth->execute, "automatically finish when execute run again"); #DBI->trace(9, "c:/trace.txt"); # TBD: Make skip block! my $connstr = $ENV{DBI_DSN}; SKIP: { skip "DSN already contains DRIVER= or DSN=", 3 unless (!($connstr =~ /DSN=/i || $connstr =~ /DRIVER=/i)); $connstr =~ s/ODBC:/ODBC:DSN=/; my $dbh3 = DBI->connect($ENV{DBI_DSN} . "x", $ENV{DBI_USER}, $ENV{DBI_PASS}, {RaiseError=>0, PrintError=>0}); ok(defined($DBI::errstr), "INVALID DSN Test: " . $DBI::errstr . "\n"); $dbh3->disconnect if (defined($dbh3)); $dbh3 = DBI->connect($connstr, $ENV{DBI_USER}, $ENV{DBI_PASS}, {RaiseError=>0, PrintError=>0}); ok(defined($dbh3), "Connection with DSN=$connstr"); $dbh3->disconnect if (defined($dbh3)); my $cs = $connstr; $cs .= ";UID=$ENV{DBI_USER}" if exists($ENV{DBI_USER}); $cs .= ";PWD=$ENV{DBI_PASS}" if exists($ENV{DBI_PASS}); $dbh3 = DBI->connect($cs,undef,undef, {RaiseError=>0, PrintError=>0}); ok(defined($dbh3), "Connection with DSN=$connstr and UID and PWD are set") or diag($cs); $dbh3->disconnect if (defined($dbh3)); }; # Test(1); # clean up $sth->finish; exit(0); sub tab_select { my $dbh = shift; my @row; my $rowcount = 0; $dbh->{LongReadLen} = 1000; my $sth = $dbh->prepare("SELECT COL_A, COL_B, COL_C, COL_D FROM $ODBCTEST::table_name ORDER BY COL_A") or return undef; $sth->execute(); ok($sth->{NUM_OF_FIELDS} == 4, 'NUM_OF_FIELDS'); my $columns = $sth->{NAME_uc}; #diag Data::Dumper->Dump([$columns], [qw(column_names)]); is(scalar(@$columns), 4, 'NAME returns right number of columns'); is($columns->[0], 'COL_A', 'column name for column 1'); is($columns->[1], 'COL_B', 'column name for column 2'); is($columns->[2], 'COL_C', 'column name for column 3'); is($columns->[3], 'COL_D', 'column name for column 4'); while (@row = $sth->fetchrow()) { # print "$row[0]|$row[1]|$row[2]|\n"; ++$rowcount; if ($rowcount != $row[0]) { # print "Basic retrieval of rows not working!\nRowcount = $rowcount, while retrieved value = $row[0]\n"; $sth->finish; return 0; } } $sth->finish(); $sth = $dbh->prepare("SELECT COL_A,COL_C FROM $ODBCTEST::table_name WHERE COL_A>=4") or return undef; $sth->execute(); while (@row = $sth->fetchrow()) { if ($row[0] == 4) { if (!is($row[1], $ODBCTEST::longstr, "long strings compare")) { diag("Basic retrieval of longer rows not working\n" . DBI::data_diff($row[1], $ODBCTEST::longstr)); return 0; } } elsif ($row[0] == 5) { if (!is($row[1], $ODBCTEST::longstr2, "long strings compare 255")) { diag("Basic retrieval of row longer than 255 chars" . " not working!\n" . DBI::data_diff($row[1], $ODBCTEST::longstr2)); return 0; } } } return 1; } # # returns 1 unless the eval around the select fails (e.g. if truncation) # sub select_long { my ($dbh, $max_col, $expect) = @_; $$max_col = undef; my @row; my $sth; my $rc = 0; my $longest = undef; local $dbh->{RaiseError} = 1; local $dbh->{PrintError} = 0; local $dbh->{PrintWarn} = 0; $sth = $dbh->prepare("SELECT COL_A,COL_C FROM $ODBCTEST::table_name WHERE COL_A=4"); if ($sth) { $sth->execute(); eval { while (@row = $sth->fetchrow()) { foreach my $c (@row) { if (!$longest) { $longest = length($c); } else { $longest = length($c) if length($c) > $longest; } } } }; $rc = 1 unless ($@) ; } if ($rc != $expect) { diag("Row " . (map {(defined($_) ? $_ : 'undef') . ','} @row) . "\n"); diag("expect=$expect, Longest: " . DBI::neat($longest) . "\n"); } $$max_col = $longest; $rc; } __END__ DBD-ODBC-1.61/t/rt_39897.t0000755000175000017500000000466712254015247013767 0ustar martinmartin#!/usr/bin/perl -w -I./t # # test for rt 39897. DBD::ODBC 1.17 was accidentally changed to apply # LongReadLen to SQL_VARCHAR columns. 1.16 and earlier only use LongTruncOk # and LongReadLen on long columns e.g. SQL_LONGVARCHAR. As a result, if you # had a table with a varchar(N) where N > 80 (80 being the default for # LongReadLen) and moved from 1.16 to 1.17 then you'd suddenly get data # truncated errors for rows where the SQL_VARCHAR was > 80 chrs. # use Test::More; use strict; $| = 1; my $has_test_nowarnings = 1; eval "require Test::NoWarnings"; $has_test_nowarnings = undef if $@; my $tests = 6; $tests += 1 if $has_test_nowarnings; plan tests => $tests; # can't seem to get the imports right this way use DBI qw(:sql_types); #1 use_ok('ODBCTEST'); my $dbh; BEGIN { if (!defined $ENV{DBI_DSN}) { plan skip_all => "DBI_DSN is undefined"; } } END { if ($dbh) { eval { local $dbh->{PrintWarn} = 0; local $dbh->{PrintError} = 0; $dbh->do(q/drop table PERL_DBD_rt_39897/); }; $dbh->disconnect; } Test::NoWarnings::had_no_warnings() if ($has_test_nowarnings); } $dbh = DBI->connect(); unless($dbh) { BAIL_OUT("Unable to connect to the database $DBI::errstr\nTests skipped.\n"); exit 0; } my ($ev, $sth); eval { local $dbh->{PrintWarn} = 0; local $dbh->{PrintError} = 0; $dbh->do('drop table PERL_DBD_rt_39897'); }; eval { $dbh->do('create table PERL_DBD_rt_39897 (a VARCHAR(100))'); }; $ev = $@; #2 diag($ev) if $ev; ok(!$ev, 'create test table with varchar'); SKIP: { skip "Failed to create test table", 1 if ($ev); eval { $sth = $dbh->prepare('INSERT into PERL_DBD_rt_39897 VALUES (?)'); }; $ev = $@; diag($ev) if $ev; #3 ok($sth && !$@, "prepare insert"); }; SKIP: { skip "Failed to prepare", 1 if ($ev); eval {$sth->execute('x' x 100)}; $ev = $@; diag($ev) if $ev; #4 ok(!$ev, "execute insert"); }; SKIP: { skip "Failed to execute", 2 if ($ev); eval { $sth = $dbh->prepare(q/select a from PERL_DBD_rt_39897/); $sth->execute; }; $ev = $@; diag($ev) if $ev; ok(!$ev, 'issue select on test table'); }; SKIP: { my @row; eval { local $sth->{RaiseError} = 1; local $sth->{PrintError} = 0; @row = $sth->fetchrow_array; }; $ev = $@; diag($ev) if $ev; ok(!$ev, 'fetch varchar(100) from test table'); }; DBD-ODBC-1.61/t/09multi.t0000755000175000017500000001141512254015017014042 0ustar martinmartin#!/usr/bin/perl -I./t -w use Test::More; eval "require Test::NoWarnings"; $| = 1; my $has_test_nowarnings = 1; eval "require Test::NoWarnings"; $has_test_nowarnings = undef if $@; my $tests = 7; $tests += 1 if $has_test_nowarnings; plan tests => $tests; use_ok('strict'); use_ok('DBI'); use_ok('ODBCTEST'); BEGIN { if (!defined $ENV{DBI_DSN}) { plan skip_all => "DBI_DSN is undefined"; } } END { Test::NoWarnings::had_no_warnings() if ($has_test_nowarnings); } # $ENV{'ODBCINI'}="/export/cmn/etc/odbc.ini" ; #my($connectString) = "dbi:ODBC:DSN=TESTDB;Database=xxxxx;uid=usrxxxxx;pwd=xxxxx" ; my $dbh=DBI->connect(); unless($dbh) { BAIL_OUT("Unable to connect to the database $DBI::errstr\nTests skipped.\n"); exit 0; } $dbh->{RaiseError} = 1; $dbh->{PrintError} = 0; $dbh->{LongReadLen} = 10000; SKIP: { skip "Multiple statements not supported using " . $dbh->get_info(17) . " (SQL_MULT_RESULT_SETS)", 4 unless ($dbh->get_info(36) eq "Y"); my($sqlStr) ; my @test_colnames = sort(keys(%ODBCTEST::TestFieldInfo)); $sqlStr = "select $test_colnames[0] FROM $ODBCTEST::table_name; select $test_colnames[0] from $ODBCTEST::table_name" ; #$sqlStr = "select emp_id from employee where emp_id = 2 # select emp_id, emp_name, address1, address2 from employee where emp_id = 2" ; my $result_sets = 0; my $sth; eval { $sth = $dbh->prepare($sqlStr); $sth->execute; }; if ($@) { skip("Multiple statements not supported using " . $dbh->get_info(17) . "\n", 4); } my @row; my $cnt = 0; $result_sets = 0; do { # print join(":", @{$sth->{NAME}}), "\n"; while ( my $ref = $sth->fetch ) { # print join(":", @$ref), "\n"; } $result_sets++; } while ( $sth->{odbc_more_results} ) ; is($result_sets, 2, "count number of result sets"); my $sql; my @expected_result_cols; # lets get some dummy data for testing. ODBCTEST::tab_insert($dbh); $sql = "select $test_colnames[0] from $ODBCTEST::table_name order by $test_colnames[0]; select $test_colnames[0],$test_colnames[1] from $ODBCTEST::table_name order by $test_colnames[0]"; @expected_result_cols = (1, 2); ok(RunMultiTest($sql, \@expected_result_cols), "Multiple result sets with different column counts (less then more)"); $sql = "select $test_colnames[0],$test_colnames[1] from $ODBCTEST::table_name order by $test_colnames[0]; select $test_colnames[0] from $ODBCTEST::table_name order by $test_colnames[0]"; @expected_result_cols = (2, 1); ok(RunMultiTest($sql, \@expected_result_cols), "Multiple result sets with different column counts (more then less)"); $sql = "select " . join(", ", grep {/COL_[ABC]/} @test_colnames) . " from $ODBCTEST::table_name order by $test_colnames[0]; select $test_colnames[0] from $ODBCTEST::table_name order by $test_colnames[0]"; @expected_result_cols = ($#test_colnames, 1); ok(RunMultiTest($sql, \@expected_result_cols), "Multiple result sets with multiple cols, then second result set with one col"); # clean up the dummy data. ODBCTEST::tab_delete($dbh); }; $dbh->disconnect(); sub RunMultiTest { my $sql = shift; my $ref_expected_result_cols = shift; my @expected_result_cols = @$ref_expected_result_cols; my $test_pass = 1; my $result_sets = 0; $sth = $dbh->prepare($sql); $sth->execute; do { # $#expected_result_cols is the array of number of result cols # and the count/array size represents the number of result sets... if ($result_sets > $#expected_result_cols) { print "Number of result sets not correct in test $result_sets is more than the expected $#expected_result_cols.\n"; $test_pass = 0; } else { if ($sth->{NUM_OF_FIELDS} != $expected_result_cols[$result_sets]) { print "Num of fields not correct in result set $result_sets. Expected $expected_result_cols[$result_sets], found $sth->{NUM_OF_FIELDS}\n"; $test_pass = 0; } } # print join(", ", @{$sth->{NAME}}), "\n"; my $i = 0; while ( my $ref = $sth->fetchrow_arrayref ) { # if ($] > 5.005) { # no warnings; # print join(":", @$ref), "\n"; #} my $row = $ODBCTEST::tab_insert_values[$i]; my $j; for ($j = 0; $j < $sth->{NUM_OF_FIELDS}; $j++) { if ($row->[$j] ne $ref->[$j]) { print "Data mismatch, result set $result_sets, row $i, col $j ($row->[$j] != $ref->[$j])\n"; $test_pass = 0; } } $i++; } $result_sets++; } while ( $sth->{odbc_more_results} ) ; if ($result_sets <= $#expected_result_cols) { print "Number of result sets not correct in test (fewer than expected)\n"; $test_pass = 0; } $test_pass; } exit(0); print $DBI::errstr; print $ODBCTEST::tab_insert_values[0]; print sort(keys(%ODBCTEST::TestFieldInfo)); DBD-ODBC-1.61/t/rt_79190.t0000755000175000017500000000171312254015313013734 0ustar martinmartin#!/usr/bin/perl -w -I./t # # rt 79190 # # If you use a connection string like 'dbi:ODBC:DSN=xxx' DBD::ODBC # should append the username and password to it from the other args to # connect as UID=xxx;PWD=yyy # use Test::More; use strict; use DBI; use_ok('ODBCTEST'); eval "require Test::NoWarnings"; my $has_test_nowarnings = ($@ ? undef : 1); my $dbh; BEGIN { if (!defined $ENV{DBI_DSN}) { plan skip_all => "DBI_DSN is undefined"; } if (!defined $ENV{DBI_USER}) { plan skip_all => "DBI_USER is undefined"; } if (!defined $ENV{DBI_PASS}) { plan skip_all => "DBI_PASS is undefined"; } } END { Test::NoWarnings::had_no_warnings() if ($has_test_nowarnings); done_testing(); } my $dsn = $ENV{DBI_DSN}; if ($dsn !~ /dbi:ODBC:DSN=/i && $dsn !~ /dbi:ODBC:DRIVER=/i) { $dsn =~ s/dbi:ODBC:(.*)/dbi:ODBC:DSN=$1/; } $dbh = DBI->connect($dsn, $ENV{DBI_USER}, $ENV{DBI_PASS}); ok($dbh, "User/pass appended to DSN"); DBD-ODBC-1.61/t/90_trace_flags.t0000755000175000017500000000145712254015172015330 0ustar martinmartin#!/usr/bin/perl -w -I./t use 5.006; use strict; use warnings; use Test::More; use DBI; my $has_test_nowarnings = 1; eval "require Test::NoWarnings"; $has_test_nowarnings = undef if $@; BEGIN { if (!defined $ENV{DBI_DSN}) { plan skip_all => "DBI_DSN is undefined"; } } my $h = DBI->connect(); unless($h) { BAIL_OUT("Unable to connect to the database ($DBI::errstr)\nTests skipped.\n"); exit 0; } my $bit; $bit = $h->parse_trace_flag('odbcunicode'); is($bit, 0x02_00_00_00, 'odbcunicode'); $bit = $h->parse_trace_flag('odbcconnection'); is($bit, 0x04_00_00_00, 'odbcconnection'); my $val; $val = $h->parse_trace_flags('odbcunicode|odbcconnection'); is($val, 0x06_00_00_00, "parse_trace_flags"); $h->disconnect; Test::NoWarnings::had_no_warnings() if ($has_test_nowarnings); done_testing; DBD-ODBC-1.61/t/20SqlServer.t0000755000175000017500000005705512277107326014653 0ustar martinmartin#!/usr/bin/perl -w -I./t use Test::More; use strict; $| = 1; my $has_test_nowarnings = 1; eval "require Test::NoWarnings"; $has_test_nowarnings = undef if $@; my $tests = 67; $tests += 1 if $has_test_nowarnings; plan tests => $tests; my $dbh; use DBI qw(:sql_types); use DBI::Const::GetInfoType; use_ok('ODBCTEST'); BEGIN { plan skip_all => "DBI_DSN is undefined" if (!defined $ENV{DBI_DSN}); } END { # tidy up if ($dbh) { local $dbh->{PrintError} = 0; local $dbh->{PrintWarn} = 0; eval { $dbh->do(q/drop procedure PERL_DBD_PROC1/); $dbh->do(q/drop procedure PERL_DBD_PROC2/); $dbh->do(q/drop table PERL_DBD_TABLE1/); }; } Test::NoWarnings::had_no_warnings() if ($has_test_nowarnings); } my $dbms_name; my $dbms_version; my $m_dbmsversion; my $driver_name; sub getinfo { my $dbh = shift; $dbms_name = $dbh->get_info($GetInfoType{SQL_DBMS_NAME}); ok($dbms_name, "got DBMS name: $dbms_name"); $dbms_version = $dbh->get_info($GetInfoType{SQL_DBMS_VER}); ok($dbms_version, "got DBMS version: $dbms_version"); $m_dbmsversion = $dbms_version; $m_dbmsversion =~ s/^(\d+).*/$1/; ok($m_dbmsversion, "got DBMS major version: $m_dbmsversion"); $driver_name = $dbh->get_info($GetInfoType{SQL_DRIVER_NAME}); ok($driver_name, "got Driver Name: $driver_name"); } sub varmax_test { my ($dbh, $coltype) = @_; SKIP: { skip "SQL Server major version $m_dbmsversion too old", 4 if $m_dbmsversion < 9; my $data = 'x' x 1000; my $datalen = length($data); local $dbh->{PrintError} = 0; local $dbh->{RaiseError} = 1; local $dbh->{LongReadLen} = length($data) * 2; eval {$dbh->do(q/drop table PERL_DBD_TABLE1/);}; eval { $dbh->do(qq/create table PERL_DBD_TABLE1 (a int identity, b $coltype(MAX))/); # workaround freeTDS problem: if ($driver_name =~ /tdsodbc/) { $dbh->do( qq/insert into PERL_DBD_TABLE1 (b) values(CAST(? AS $coltype(MAX)))/, undef, $data); } else { $dbh->do(q/insert into PERL_DBD_TABLE1 (b) values(?)/, undef, $data); } }; diag($@) if $@; ok(!$@, "create PERL_DBD_TABLE1 and insert test data"); SKIP: { skip "failed to create test table or insert data", 3 if $@; my $sth = $dbh->prepare(q/select a,b from PERL_DBD_TABLE1/); $sth->execute; my ($a, $b); eval { ($a, $b) = $sth->fetchrow_array; }; diag($@) if $@; ok(!$@, "fetchrow for $coltype(max)"); SKIP: { skip "fetchrow failed", 2 if $@; ok($b, "data received from $coltype(max)"); is(length($b), $datalen, 'all data (' . length($b) . ") received from $coltype(max)"); }; }; }; eval { local $dbh->{PrintError} = 0; local $dbh->{RaiseError} = 0; $dbh->do(q/drop table PERL_DBD_TABLE1/); }; } sub _do_proc { my ($dbh, $proc) = @_; my $sth; eval {$sth = $dbh->prepare($proc, {odbc_exec_direct => 1})}; my $ev = $@; diag($ev) if $ev; ok(!$ev, "prepare for $proc"); SKIP: { skip "prepare for $proc failed", 3 if $ev; SKIP: { eval {$sth->execute}; $ev = $@; diag($ev) if $ev; ok(!$ev, "execute for $proc"); SKIP: { skip "execute for $proc failed", 2 if $ev; my $fields; eval {$fields = $sth->{NUM_OF_FIELDS}}; $ev = $@; diag($ev) if $ev; ok(!$ev, "NUM_OF_FIELDS for $proc"); like($fields, qr|^\d+$|, "numeric fields"); }; $sth->finish; }; }; } sub procs_with_no_results { my $dbh = shift; local $dbh->{PrintError} = 0; eval {$dbh->do(q/drop procedure PERL_DBD_PROC1/)}; eval {$dbh->do(q/drop procedure PERL_DBD_PROC2/)}; my $proc1 = <do($proc1)}; my $ev = $@; diag($ev) if $ev; ok(!$ev, 'create perl_dbd_proc1 procedure'); SKIP: { skip 'failed to create perl_dbd_proc1 procedure', 9 if $ev; SKIP: { eval {$dbh->do($proc2)}; $ev = $@; diag($ev) if $ev; ok(!$ev, 'create perl_dbd_proc2 procedure'); SKIP: { skip 'failed to create perl_dbd_proc2 procedure', 8 if $ev; _do_proc($dbh, 'PERL_DBD_PROC1'); _do_proc($dbh, 'PERL_DBD_PROC2'); }; }; }; } sub Multiple_concurrent_stmts { my ($dbh, $expect) = @_; my $sth = $dbh->prepare("select * from PERL_DBD_TABLE1"); $dbh->{RaiseError} = 1; $sth->execute; my @row; eval { while (@row = $sth->fetchrow_array()) { my $sth2 = $dbh->prepare("select * from $ODBCTEST::table_name"); $sth2->execute; my @row2; while (@row2 = $sth2->fetchrow_array()) { } } }; if ($@) { diag($@) if (defined($expect) && ($expect == 1)); return 0; } diag("Expected fail of MARS and it worked!") if (defined($expect) && ($expect == 0)); return 1; } $dbh = DBI->connect(); unless($dbh) { BAIL_OUT("Unable to connect to the database $DBI::errstr\nTests skipped.\n"); exit 0; } my $sth; my $dbname = $dbh->get_info($GetInfoType{SQL_DBMS_NAME}); SKIP: { skip "Microsoft SQL Server tests not supported using $dbname", 66 unless ($dbname =~ /Microsoft SQL Server/i); getinfo($dbh); varmax_test($dbh, 'varchar'); varmax_test($dbh, 'varbinary'); varmax_test($dbh, 'nvarchar'); procs_with_no_results($dbh); # the times chosen below are VERY specific to NOT cause rounding errors, # but may cause different errors on different versions of SQL Server. my @data = ( [undef, "z" x 13 ], ["2001-01-01 01:01:01.110", "a" x 12], # "aaaaaaaaaaaa" ["2002-02-02 02:02:02.123", "b" x 114], ["2003-03-03 03:03:03.333", "c" x 251], ["2004-04-04 04:04:04.443", "d" x 282], ["2005-05-05 05:05:05.557", "e" x 131] ); eval { local $dbh->{PrintError} = 0; $dbh->do("DROP TABLE PERL_DBD_TABLE1"); }; $dbh->{RaiseError} = 1; $dbh->{LongReadLen} = 800; my @types = (SQL_TYPE_TIMESTAMP, SQL_TIMESTAMP); my $row = $dbh->type_info(\@types); BAIL_OUT("Unable to find a suitable test type for date field") if !$row; my $datetype = $row->{TYPE_NAME}; $dbh->do("CREATE TABLE PERL_DBD_TABLE1 (i INTEGER, time $datetype, str VARCHAR(4000))"); # Insert records into the database: my $sth1 = $dbh->prepare("INSERT INTO PERL_DBD_TABLE1 (i,time,str) values (?,?,?)"); for (my $i=0; $i<@data; $i++) { my ($time,$str) = @{$data[$i]}; # print "Inserting: $i, "; # print $time if (defined($time)); # print " string length " . length($str) . "\n"; $sth1->bind_param (1, $i, SQL_INTEGER); $sth1->bind_param (2, $time, SQL_TIMESTAMP); $sth1->bind_param (3, $str, SQL_LONGVARCHAR); $sth1->execute or die ($DBI::errstr); } # Retrieve records from the database, and see if they match original data: my $sth2 = $dbh->prepare("SELECT i,time,str FROM PERL_DBD_TABLE1"); $sth2->execute or die ($DBI::errstr); my $iErrCount = 0; while (my ($i,$time,$str) = $sth2->fetchrow_array()) { if (defined($time)) { $time =~ s/0000$//o; } if ((defined($time) && $time ne $data[$i][0]) || defined($time) != defined($data[$i][0])) { diag("Retrieving: $i, $time string length: " . length($str) . "\t!time "); $iErrCount++; } if ($str ne $data[$i][1]) { diag("Retrieving: $i, $time string length: " . length($str) . "\t!string "); $iErrCount++; } # print "\n"; } is($iErrCount, 0, "errors on data comparison"); eval { local $dbh->{RaiseError} = 0; $dbh->do("DROP TABLE PERL_DBD_TABLE1"); }; my $sql = 'CREATE TABLE #PERL_DBD_TABLE1 (id INT PRIMARY KEY, val VARCHAR(4))'; $dbh->do($sql); # doesn't work with prepare, etc...hmmm why not? # $sth = $dbh->prepare($sql); # $sth->execute; # $sth->finish; # See http://technet.microsoft.com/en-US/library/ms131667.aspx # which says # "Prepared statements cannot be used to create temporary objects on SQL # Server 2000 or later..." # $sth = $dbh->prepare("INSERT INTO #PERL_DBD_TABLE1 (id, val) VALUES (?, ?)"); $sth2 = $dbh->prepare("INSERT INTO #PERL_DBD_TABLE1 (id, val) VALUES (?, ?)"); my @data2 = (undef, 'foo', 'bar', 'blet', undef); my $i = 0; my $val; foreach $val (@data2) { $sth2->execute($i++, $val); } $i = 0; $sth = $dbh->prepare("Select id, val from #PERL_DBD_TABLE1"); $sth->execute; $iErrCount = 0; while (my @row = $sth->fetchrow_array) { unless ((!defined($row[1]) && !defined($data2[$i])) || ($row[1] eq $data2[$i])) { $iErrCount++ ; print "$row[1] ne $data2[$i]\n"; } $i++; } is($iErrCount, 0, "temporary table handling"); diag("Please upgrade your ODBC drivers to the latest SQL Server drivers available. For example, 2000.80.194.00 is known to be problematic. Use MDAC 2.7, if possible\n") if ($iErrCount != 0); $dbh->{PrintError} = 0; eval {$dbh->do("DROP TABLE PERL_DBD_TABLE1");}; eval {$dbh->do("CREATE TABLE PERL_DBD_TABLE1 (i INTEGER)");}; eval {$dbh->do("DROP PROCEDURE PERL_DBD_PROC1");}; eval {$dbh->do("CREATE PROCEDURE PERL_DBD_PROC1 \@inputval int AS ". "INSERT INTO PERL_DBD_TABLE1 VALUES (\@inputval); " . " return \@inputval;");}; $sth1 = $dbh->prepare ("{? = call PERL_DBD_PROC1(?) }"); my $output = undef; $i = 1; $iErrCount = 0; while ($i < 4) { $sth1->bind_param_inout(1, \$output, 50, DBI::SQL_INTEGER); $sth1->bind_param(2, $i, DBI::SQL_INTEGER); $sth1->execute(); # print "$output"; if (!defined($output) || ($output !~ /\d+/) || ($output != $i)) { $iErrCount++; diag("output='$output' error, expected $i\n"); } # print "\n"; $i++; } is($iErrCount, 0, "bind param in out with insert result set"); $iErrCount = 0; eval {$dbh->do("DROP PROCEDURE PERL_DBD_PROC1");}; my $proc1 = "CREATE PROCEDURE PERL_DBD_PROC1 (\@i int, \@result int OUTPUT) AS ". "BEGIN ". " SET \@result = \@i+1;". "END "; # print "$proc1\n"; $dbh->do($proc1); # $dbh->{PrintError} = 1; $sth1 = $dbh->prepare ("{call PERL_DBD_PROC1(?, ?)}"); $i = 12; $output = undef; $sth1->bind_param(1, $i, DBI::SQL_INTEGER); $sth1->bind_param_inout(2, \$output, 100, DBI::SQL_INTEGER); $sth1->execute; is($i, $output-1, "test output params accurate"); $iErrCount = 0; $sth = $dbh->prepare("select * from PERL_DBD_TABLE1 order by i"); $sth->execute; $i = 1; while (my @row = $sth->fetchrow_array) { if ($i != $row[0]) { diag(join(', ', @row), " ERROR!\n"); $iErrCount++; } $i++; } is($iErrCount, 0, "verify select data"); eval {$dbh->do("DROP TABLE PERL_DBD_TABLE1");}; eval {$dbh->do("CREATE TABLE PERL_DBD_TABLE1 (d DATETIME)");}; $sth = $dbh->prepare ("INSERT INTO PERL_DBD_TABLE1 (d) VALUES (?)"); $sth->bind_param (1, undef, SQL_TYPE_TIMESTAMP); $sth->execute(); $sth->bind_param (1, "2002-07-12 05:08:37.350", SQL_TYPE_TIMESTAMP); $sth->execute(); $sth->bind_param (1, undef, SQL_TYPE_TIMESTAMP); $sth->execute(); $iErrCount = 0; $sth2 = $dbh->prepare("select * from PERL_DBD_TABLE1 where d is not null"); $sth2->execute; while (my @row = $sth2->fetchrow_array) { if ($row[0] ne "2002-07-12 05:08:37.350") { $iErrCount++ ; diag(join(", ", @row), "\n"); } } is($iErrCount, 0, "timestamp handling"); eval {$dbh->do('DROP TABLE PERL_DBD_TABLE1');}; eval {$dbh->do('DROP PROCEDURE PERL_DBD_PROC1');}; eval {$dbh->do('CREATE TABLE PERL_DBD_TABLE1 (i INTEGER, j integer)')} or diag($@); $proc1 = <{RaiseError} = 0; # NOTE: MS SQL native client for linux fails this test because # SQLExecute returns SQL_NO_DATA even though the proc never did # a searched update/delete - AND it works on the same Windows driver. eval {$dbh->do($proc1)} or diag($@); my $sth = $dbh->prepare ('{call PERL_DBD_PROC1 (?)}'); my $success = -1; $sth->bind_param (1, 99, SQL_INTEGER); my $cres = $sth->execute(); ok(defined($cres), "execute for non searched update via procedure"); if (!defined($cres)) { note("Your driver has a bug which means it is probably incorrectly returning SQL_NO_DATA from a non-searched update"); } SKIP: { skip "execute failed - probably SQL_NO_DATA bug", 4 if !defined($cres); ok($cres eq '0E0' || $cres == -1, "0/unknown rows updated"); $success = -1; while (my @data = $sth->fetchrow_array()) {($success) = @data;} is($success, 100, 'procedure outputs results as result set'); $sth->bind_param (1, 10, SQL_INTEGER); $sth->execute(); $success = -1; while (my @data = $sth->fetchrow_array()) {($success) = @data;} is($success,10, 'procedure outputs results as result set2'); $sth->bind_param (1, 111, SQL_INTEGER); $sth->execute(); $success = -1; do { my @data; while (@data = $sth->fetchrow_array()) { if ($#data == 0) { ($success) = @data; } } } while ($sth->{odbc_more_results}); is($success, 111, 'procedure outputs results as result set 3'); }; # # special tests for even stranger cases... # eval {$dbh->do("DROP PROCEDURE PERL_DBD_PROC1");}; $proc1 = < 100) BEGIN INSERT INTO PERL_DBD_TABLE1 (i, j) VALUES (\@i, \@i); SELECT i, j from PERL_DBD_TABLE1; END; SELECT \@result; END EOT eval {$dbh->do($proc1);}; # set the required attribute and check it. $dbh->{odbc_force_rebind} = 1; is($dbh->{odbc_force_rebind}, 1, "setting force_rebind"); $dbh->{odbc_force_rebind} = 0; is($dbh->{odbc_force_rebind}, 0, "resetting force_rebind"); $sth = $dbh->prepare ("{call PERL_DBD_PROC1 (?)}"); is($sth->{odbc_force_rebind}, 0, "testing force rebind after procedure call"); $success = -1; $sth->bind_param (1, 99, SQL_INTEGER); $cres = $sth->execute(); ok(defined($cres), "execute for non searched update via procedure, force_rebind"); if (!defined($cres)) { note("Your driver has a bug which means it is probably incorrectly returning SQL_NO_DATA from a non-searched update"); } SKIP: { skip "execute failed - probably SQL_NO_DATA bug", 3 if !defined($cres); $success = -1; while (my @data = $sth->fetchrow_array()) {($success) = @data;} is($success, 100, "force rebind test part 2"); $sth->bind_param (1, 10, SQL_INTEGER); $sth->execute(); $success = -1; while (my @data = $sth->fetchrow_array()) {($success) = @data;} is($success, 10, "force rebind test part 3"); $sth->bind_param (1, 111, SQL_INTEGER); $sth->execute(); $success = -1; do { my @data; while (@data = $sth->fetchrow_array()) { if ($#data == 0) { ($success) = @data; } else { # diag("Data: ", join(',', @data), "\n"); } } } while ($sth->{odbc_more_results}); is($success, 111, "force rebind test part 4"); # ensure the attribute is automatically set. # the multiple result sets will trigger this. is($sth->{odbc_force_rebind}, 1, "forced rebind final"); } # # more special tests # make sure output params are being set properly when # multiple result sets are available. Also, ensure fetchrow_hashref # works with multiple statements. # eval {$dbh->do("DROP PROCEDURE PERL_DBD_PROC1");}; $dbh->do("CREATE PROCEDURE PERL_DBD_PROC1 \@parameter1 int = 22 AS /* SET NOCOUNT ON */ select 1 as some_data select isnull(\@parameter1, 0) as parameter1, 3 as some_more_data RETURN(\@parameter1 + 1)"); my $queryInputParameter1 = 2222; my $queryOutputParameter = 0; $sth = $dbh->prepare('{? = call PERL_DBD_PROC1(?) }'); $sth->bind_param_inout(1, \$queryOutputParameter, 30, { TYPE => DBI::SQL_INTEGER }); $sth->bind_param(2, $queryInputParameter1, { TYPE => DBI::SQL_INTEGER }); $sth->execute(); do { for(my $rowRef; $rowRef = $sth->fetchrow_hashref('NAME'); ) { my %outputData = %$rowRef; if (defined($outputData{some_data})) { is($outputData{some_data},1,"Select data available"); ok(!defined($outputData{parameter1}), "output param not yet available"); ok(!defined($outputData{some_more_data}), "output param not yet available2"); } else { is($outputData{parameter1},2222, "Output param data available"); is($outputData{some_more_data},3, "Output param data available 2"); ok(!defined($outputData{some_data}), "select data done"); } # diag('outputData ', Dumper(\%outputData), "\n"); } # print "out of for loop\n"; } while($sth->{odbc_more_results}); # print "out of while loop\n"; is($queryOutputParameter, $queryInputParameter1 + 1, "valid output data"); # test a procedure with no parameters eval {$dbh->do("DROP PROCEDURE PERL_DBD_PROC1");}; eval {$dbh->do("CREATE PROCEDURE PERL_DBD_PROC1 AS return 1;");}; $sth1 = $dbh->prepare ("{ ? = call PERL_DBD_PROC1 }"); $output = undef; $iErrCount = 0; $sth1->bind_param_inout(1, \$output, 50, DBI::SQL_INTEGER); $sth1->execute(); is($output, 1, "test procedure with no input params"); $sth1 = undef; # could still be active with some drivers $dbh->{odbc_async_exec} = 1; # print "odbc_async_exec is: $dbh->{odbc_async_exec}\n"; is($dbh->{odbc_async_exec}, 1, "test odbc_async_exec attribute set"); # not sure if this should be a test. May have permissions problems, but # it's the only sample of the error handler stuff I have. my $testpass = 0; my $lastmsg; sub err_handler { my ($state, $msg, $nativeerr) = @_; # Strip out all of the driver ID stuff # normally something like [SQL Server Native Client 10.0][SQL Server] $msg =~ s/^(\[[\w\s:\.]*\])+//; $lastmsg = $msg; #diag "===> state: $state msg: $msg nativeerr: $nativeerr\n"; $testpass++; return 0; } $dbh->{odbc_err_handler} = \&err_handler; $sth = $dbh->prepare("dbcc TRACESTATUS(0)"); $sth->execute; cmp_ok($testpass, '>', 0, "dbcc messages being returned"); $testpass = 0; $dbh->{odbc_async_exec} = 0; is($dbh->{odbc_async_exec}, 0, "reset async exec"); $dbh->do(q/delete from PERL_DBD_TABLE1/); $dbh->do(q/insert into PERL_DBD_TABLE1 values(1, 1)/); $dbh->{odbc_exec_direct} = 1; is($dbh->{odbc_exec_direct}, 1, "test setting odbc_exec_direct"); $sth2 = $dbh->prepare("print 'START' select count(*) from PERL_DBD_TABLE1 print 'END'"); $sth2->execute; do { while (my @row = $sth2->fetchrow_array) { is($row[0], 1, "Valid select results with print statements"); } } while ($sth2->{odbc_more_results}); is($testpass,2, "ensure 2 error messages from two print statements"); is($lastmsg, 'END', "validate error messages being retrieved"); # need the finish if there are print statements (for now) #$sth2->finish; $dbh->{odbc_err_handler} = undef; # We need to make sure there is sufficient data returned to # overflow the TDS buffer. If all the results fit into one buffer # the tests checking for MAS not working work succeed. for (my $i = 1; $i < 1000; $i += 2) { $dbh->do('insert into PERL_DBD_TABLE1 (i, j) values (?, ?)', undef, $i, $i+1); } #$dbh->do("insert into PERL_DBD_TABLE1 (i, j) values (1, 2)"); #$dbh->do("insert into PERL_DBD_TABLE1 (i, j) values (3, 4)"); $dbh->disconnect; my $dsn = $ENV{DBI_DSN}; if ($dsn !~ /^dbi:ODBC:DSN=/ && $dsn !~ /DRIVER=/i) { my @a = split(q/:/, $ENV{DBI_DSN}); $dsn = join(q/:/, @a[0..($#a - 1)]) . ":DSN=" . $a[-1]; } my $base_dsn = $dsn; $dsn .= ";MARS_Connection=no"; $dbh = DBI->connect($dsn, $ENV{DBI_USER}, $ENV{DBI_PASS}, {PrintError => 0}); ok($dbh, "Connected with MARS_Connection"); diag("$DBI::errstr\n$dsn\n") if !$dbh; SKIP: { skip "could not connect with MARS_Connection attribute", 1 if !$dbh; ok(!&Multiple_concurrent_stmts($dbh, 0), "Multiple concurrent statements should fail"); $dbh->disconnect; }; $dbh = DBI->connect($dsn, $ENV{DBI_USER}, $ENV{DBI_PASS}, { odbc_cursortype => 2, PrintError => 0 }); # $dbh->{odbc_err_handler} = \&err_handler; ok(&Multiple_concurrent_stmts($dbh, 1), "Multiple concurrent statements succeed (odbc_cursortype set)"); SKIP: { skip "MS SQL Server version < 9", 1 if ($m_dbmsversion < 9); $dbh->disconnect; # throw away non-mars connection $dsn = "$base_dsn;MARS_Connection=yes;"; $dbh = DBI->connect($dsn, $ENV{DBI_USER}, $ENV{DBI_PASS}, {PrintError => 0}); my $tst = "Multiple concurrent statements succeed with MARS"; if (&Multiple_concurrent_stmts($dbh,1)) { pass($tst); } else { diag("DSN=$dsn\n"); diag("\nNOTE: You failed this test because your SQL Server driver\nis too old to handle the MARS_Connection attribute. This test cannot\neasily skip this test for old drivers as there is no definite SQL Server\ndriver version it can check.\n\n"); skip 'WARNING: driver does NOT support MARS_Connection', 1; } $dbh->disconnect; # throw away mars connection $dbh = DBI->connect; } # clean up test table and procedure # reset err handler # $dbh->{odbc_err_handler} = undef; eval {$dbh->do("DROP TABLE PERL_DBD_TABLE1");}; eval {$dbh->do("DROP PROCEDURE PERL_DBD_PROC1");}; eval { local $dbh->{PrintError} = 0; $dbh->do("drop table perl_dbd_test1"); }; $dbh->do("create table perl_dbd_test1 (i integer primary key, t varchar(30))"); $dbh->{AutoCommit} = 0; $dbh->do("insert into perl_dbd_test1 (i, t) values (1, 'initial')"); $dbh->commit; $dbh->do("update perl_dbd_test1 set t = 'second' where i = 1"); my $dbh2 = DBI->connect($ENV{DBI_DSN}, $ENV{DBI_USER}, $ENV{DBI_PASS}, {odbc_query_timeout => 2, PrintError=>0}); # $dbh2->{odbc_query_timeout} = 5; $dbh2->{AutoCommit} = 0; $dbh2->do("update perl_dbd_test1 set t = 'bad' where i = ?",undef,1); $dbh2->rollback; # should timeout and get to here. if so, test will pass pass("passed timeout on query using odbc_query_timeout using do with bind params"); $dbh2->do("update perl_dbd_test1 set t = 'bad' where i = 1"); $dbh2->rollback; $dbh2->disconnect; pass("passed timeout on query using odbc_query_timeout using do without bind params"); $dbh->commit; $dbh->do("drop table perl_dbd_test1"); $dbh->commit; }; $dbh->disconnect; exit 0; # get rid of use once warnings print $DBI::errstr; print $ODBCTEST::table_name; DBD-ODBC-1.61/t/rt_59621.t0000644000175000017500000000754012250310263013730 0ustar martinmartin#!/usr/bin/perl -w -I./t # $Id: rt_38977.t 13874 2010-03-24 14:22:58Z mjevans $ # # rt 59621 # # Check DBD::ODBC handles MS SQL Server XML column type properly # use Test::More; use strict; $| = 1; my $has_test_nowarnings = 1; eval "require Test::NoWarnings"; $has_test_nowarnings = undef if $@; my $tests = 11; $tests += 1 if $has_test_nowarnings; plan tests => $tests; use DBI qw(:sql_types); use_ok('ODBCTEST'); # 1 my $dbh; BEGIN { if (!defined $ENV{DBI_DSN}) { plan skip_all => "DBI_DSN is undefined"; } } END { if ($dbh) { eval { local $dbh->{PrintWarn} = 0; local $dbh->{PrintError} = 0; $dbh->do(q/drop table PERL_DBD_RT_59621/); }; } Test::NoWarnings::had_no_warnings() # 12 if ($has_test_nowarnings); } $dbh = DBI->connect(); unless($dbh) { BAIL_OUT("Unable to connect to the database $DBI::errstr\nTests skipped.\n"); exit 0; } $dbh->{RaiseError} = 1; my $dbms_name = $dbh->get_info(17); ok($dbms_name, "got DBMS name: $dbms_name"); # 2 my $dbms_version = $dbh->get_info(18); ok($dbms_version, "got DBMS version: $dbms_version"); # 3 my $driver_name = $dbh->get_info(6); ok($driver_name, "got DRIVER name: $driver_name"); # 4 my $driver_version = $dbh->get_info(7); ok($driver_version, "got DRIVER version $driver_version"); # 5 my ($ev, $sth); SKIP: { skip "not SQL Server", 6 if $dbms_name !~ /Microsoft SQL Server/; skip "Easysoft OOB", 6 if $driver_name =~ /esoobclient/; eval { local $dbh->{PrintWarn} = 0; local $dbh->{PrintError} = 0; $dbh->do('drop table PERL_DBD_RT_59621'); }; # try and create a table with an XML column # if we cannot, we'll have to assume your SQL Server is too old # and skip the rest of the tests eval { $dbh->do('create table PERL_DBD_RT_59621 (a int primary key, b xml)'); }; $ev = $@; SKIP: { skip "Failed to create test table with XML type - server too old and perhaps does not support XML column type ($ev)", 6 if $ev; pass('created test table'); # 6 eval { $sth = $dbh->prepare('INSERT into PERL_DBD_RT_59621 VALUES (?,?)'); }; $ev = $@; diag($ev) if $ev; ok(!$ev, 'prepare insert'); # 7 SKIP: { # 1 skip "Failed to prepare xml insert - $@", 4 if $ev; my $x = '' .('z' x 500) . ''; eval { $sth->execute(1, $x); }; $ev = $@; diag($ev) if $ev; ok(!$ev, 'execute insert'); # 8 SKIP: { # 3 skip "Failed to execute insert", 3 if $ev; # now try and select the XML back # we expect a data truncation error the first time as # LongReadLen defaults to 80 eval { local $dbh->{PrintError} = 0; $sth = $dbh->selectall_arrayref( 'select * from PERL_DBD_RT_59621'); }; ok($@, 'expected select on XML type too big failed'); # 9 is($sth->state, '01004', 'data truncation error'); # 10 # now bump up LongReadLen and all should be ok # we need to make it more than 2 * expected in case it is # retrieved as WCHARs $dbh->{LongReadLen} = 2000; eval { $sth = $dbh->selectall_arrayref( 'select * from PERL_DBD_RT_59621'); }; $ev = $@; diag($ev) if $ev; ok(!$@, 'select on XML type with LongReadLen ok'); # 11 }; }; }; eval { local $dbh->{PrintWarn} = 0; local $dbh->{PrintError} = 0; $dbh->do('drop table PERL_DBD_RT_59621'); }; }; DBD-ODBC-1.61/t/sql_type_cast.t0000755000175000017500000001250712356777471015442 0ustar martinmartin#!/usr/bin/perl -w -I./t # # Test sql_type_cast via DiscardString and StrictlyTyped # NOTE: as of post 1.37 you don't need DiscardString when binding SQL_INTEGER # columns as DBD::ODBC binds them as SQL_C_LONG and uses sv_setiv. # use Test::More; use strict; use Devel::Peek; use B qw( svref_2object SVf_IOK SVf_NOK SVf_POK ); #use JSON::XS; #my $got_json_xs; #eval { # use JSON::XS #}; #$go_json_xs = 1 unless $@; $| = 1; my $has_test_nowarnings = 1; eval "require Test::NoWarnings"; $has_test_nowarnings = undef if $@; my $tests = 16; $tests += 1 if $has_test_nowarnings; plan tests => $tests; use DBI qw(:sql_types); #1 use_ok('ODBCTEST'); my $dbh; BEGIN { if (!defined $ENV{DBI_DSN}) { plan skip_all => "DBI_DSN is undefined"; } if ($DBI::VERSION < 1.611) { plan skip_all => "DBI version is too old for this test"; } } END { if ($dbh) { eval { local $dbh->{PrintWarn} = 0; local $dbh->{PrintError} = 0; $dbh->do(q/drop table PERL_DBD_drop_me/); }; $dbh->disconnect; } Test::NoWarnings::had_no_warnings() if ($has_test_nowarnings); } sub is_iv { my $sv = svref_2object(my $ref = \$_[0]); my $flags = $sv->FLAGS; # See http://www.perlmonks.org/?node_id=971411 my $x = $sv->can('PV') ? $sv->PV : undef; if (wantarray) { return ($flags & SVf_IOK, $x); } else { return $flags & SVf_IOK; } } #sub is_json_iv { # my $x = encode_json($_[0]); # if ($x =~ /"/) { # return 0; # } else { # return 1; # } #} $dbh = DBI->connect(); $dbh->{RaiseError} = 1; unless($dbh) { BAIL_OUT("Unable to connect to the database $DBI::errstr\nTests skipped.\n"); exit 0; } $dbh->{FetchHashKeyName} = 'NAME_lc'; my ($ev, $sth); eval { local $dbh->{PrintWarn} = 0; local $dbh->{PrintError} = 0; $dbh->do('drop table PERL_DBD_drop_me'); }; eval { $dbh->do('create table PERL_DBD_drop_me (a varchar(10))'); }; $ev = $@; #2 diag($ev) if $ev; ok(!$ev, 'create test table with integer'); BAIL_OUT("Failed to create test table") if $ev; eval { $dbh->do(q/insert into PERL_DBD_drop_me (a) values('100')/); }; $ev = $@; #3 diag($ev) if $ev; ok(!$ev, 'insert into table'); BAIL_OUT("Failed to insert test data") if $ev; # try as normal just fetching without binding # we'd expect to get a string and the scalar not to have IOK $sth = $dbh->prepare(q/select a from PERL_DBD_drop_me/); $sth->execute; my ($r) = $sth->fetchrow; is($r, 100, "correct value returned"); #my $j1 = encode_json [$r]; is(is_iv($r), 0, "! ivok no bind") or Dump($r); # # try binding - no type specified # should be as above # $sth->bind_col(1, \$r); $sth->execute; $sth->fetch; is($r, 100, "correct value returned Bind"); #my $j2 = encode_json [$r]; is(is_iv($r), 0, "! ivok bind") or Dump($r); # # try binding specifying an integer type # expect IOK # # NB need to re-prepare as you cannot change the bind type after a # column is bound $sth = $dbh->prepare(q/select a from PERL_DBD_drop_me/); $sth->execute; $sth->bind_col(1, \$r, {TYPE => SQL_NUMERIC}); $sth->fetch; is($r, 100, "correct value returned SQL_NUMERIC") or Dump($r); #my $j2 = encode_json [$r]; my ($iv, $pv) = is_iv($r); ok($iv, "ivok bind integer") or Dump($r); ok($pv, "PV bind integer") or Dump($r); # # try binding specifying an integer type and say discard the pv # expect IOK # # NB need to re-prepare as you cannot change the bind type after a # column is bound $sth = $dbh->prepare(q/select a from PERL_DBD_drop_me/); $sth->execute; $sth->bind_col(1, \$r, {TYPE => SQL_NUMERIC, DiscardString => 1}); $sth->fetch; is($r, 100, "correct value returned SQL_NUMERIC|DiscardString"); #my $j2 = encode_json [$r]; ($iv, $pv) = is_iv($r); ok($iv, "ivok bind integer discard") or Dump($r); ok(!$pv, "not PV bind integer discard") or Dump($r); # # try binding specifying an integer type and say discard the pv # expect IOK. NOTE we use fetchall_arrayref with a slice as # DBI rebinds columns in this case - and types/attrs should be sticky. # # NB need to re-prepare as you cannot change the bind type after a # column is bound $sth = $dbh->prepare(q/select a as one from PERL_DBD_drop_me/); $sth->execute; $sth->bind_col(1, \$r, {TYPE => SQL_NUMERIC, DiscardString => 1}); my $list = $sth->fetchall_arrayref({}); is($list->[0]{one}, 100, "correct value returned SQL_NUMERIC|DiscardString"); #my $j2 = encode_json [$r]; ($iv, $pv) = is_iv($list->[0]{one}); ok($iv, "ivok bind integer discard") or Dump($list->[0]{one}); ok(!$pv, "not PV bind integer discard") or Dump($list->[0]{one}); # cannot do the following since the driver will whinge the type cannot # be cast to an integer # Invalid character value for cast specification (SQL-22018) ###### test StrictlyTyped #####eval {$dbh->do(q/delete from PERL_DBD_drop_me/)}; #####$ev = $@; #####diag($ev) if $ev; #####BAIL_OUT('Cannot delete rows from table') if $ev; ##### #####eval {$dbh->do(q/insert into PERL_DBD_drop_me (a) values('1fred')/)}; #####$ev = $@; #####diag($ev) if $ev; #####BAIL_OUT('Cannot insert secondary test rows') if $ev; ##### #####$sth = $dbh->prepare(q/select a from PERL_DBD_drop_me/); #####$sth->execute; #####$sth->bind_col(1, \$r, {TYPE => SQL_INTEGER, StrictlyTyped => 0}); #####$sth->fetch; #####($iv, $pv) = is_iv($r); #####ok(!$iv, "ivok bind integer for strict") or Dump($r); #####ok($pv, "pv null bind integer for strict") or Dump($r); DBD-ODBC-1.61/t/40UnicodeRoundTrip.t0000644000175000017500000000736012250310263016136 0ustar martinmartin#!/usr/bin/perl -w -I./t # based on *Id: 20SqlServer.t 568 2004-11-08 15:12:37Z jurl * use strict; use warnings; use UChelp; use Test::More; use DBI qw(:sql_types); my $has_test_nowarnings; $|=1; my $WAIT=0; my @data; my $tests; my $data_tests; BEGIN { if ($] < 5.008001) { plan skip_all => "Old Perl lacking unicode support"; } elsif (!defined $ENV{DBI_DSN}) { plan skip_all => "DBI_DSN is undefined"; } @data=( "hello ASCII: the quick brown fox jumps over the yellow dog", "Hello Unicode: german umlauts (\x{00C4}\x{00D6}\x{00DC}\x{00E4}\x{00F6}\x{00FC}\x{00DF}) smile (\x{263A}) hebrew shalom (\x{05E9}\x{05DC}\x{05D5}\x{05DD})", ); push @data,map { "again $_" } @data; utf8::is_utf8($data[0]) and die "Perl set UTF8 flag on non-unicode string constant"; utf8::is_utf8($data[1]) or die "Perl did not set UTF8 flag on unicode string constant"; utf8::is_utf8($data[2]) and die "Perl set UTF8 flag on non-unicode string constant"; utf8::is_utf8($data[3]) or die "Perl did not set UTF8 flag on unicode string constant"; unshift @data,''; push @data,42; my @plaindata=grep { !utf8::is_utf8($_) } @data; @plaindata or die "OOPS"; $data_tests = 6*@data+6*@plaindata; #diag("Data Tests : $data_tests"); $tests=1+$data_tests; eval "require Test::NoWarnings"; if (!$@) { $has_test_nowarnings = 1; } $tests += 1 if $has_test_nowarnings; #diag("Total Tests : $tests"); plan tests => $tests; } END { Test::NoWarnings::had_no_warnings() if ($has_test_nowarnings); } my $dbh=DBI->connect(); ok(defined($dbh),"DBI connect"); SKIP: { skip "Unicode-specific tests disabled - not a unicode build", $data_tests if (!$dbh->{odbc_has_unicode}); if (DBI::neat($dbh->get_info(6)) =~ 'SQORA32') { skip "Oracle ODBC driver does not work with these tests", $data_tests; } my $dbname=$dbh->get_info(17); # DBI::SQL_DBMS_NAME SKIP: { my ($len,$fromdual,$skipempty); if ($dbname=~/Microsoft SQL Server/i) { ($len,$fromdual,$skipempty)=('LEN','',0); } elsif ($dbname=~/Oracle/i) { ($len,$fromdual,$skipempty)=('LENGTH','FROM DUAL',1); } elsif ($dbname=~/PostgreSQL/i) { ($len,$fromdual,$skipempty)=('LENGTH','',0); } elsif ($dbname=~/SQLite/i) { ($len,$fromdual,$skipempty)=('LENGTH','',0); } elsif ($dbname=~/ACCESS/i) { ($len,$fromdual,$skipempty)=('LEN','',0); } elsif ($dbname =~ /DB2/i) { ($len, $fromdual, $skipempty) = ('LENGTH', 'FROM SYSIBM.SYSDUMMY1', 0); } else { skip "Tests not supported using $dbname",$data_tests; } $dbh->{RaiseError} = 1; $dbh->{'LongTruncOk'}=1; $dbh->{'LongReadLen'}=32000; foreach my $txt (@data) { SKIP: { if ($skipempty and ($txt eq '')) { skip('Database is known to treat empty strings as NULL in this test',12); } unless (utf8::is_utf8($txt)) { my $sth=$dbh->prepare("SELECT ? as roundtrip, $len(?) as roundtriplen $fromdual"); ok(defined($sth),"prepare round-trip select statement plaintext"); # diag(dumpstr($txt)); $sth->bind_param (1,$txt,SQL_VARCHAR); $sth->bind_param (2,$txt,SQL_VARCHAR); pass("bind VARCHAR"); $sth->execute(); pass("execute"); my ($t,$tlen)=$sth->fetchrow_array(); pass('fetch'); cmp_ok($tlen,'==',length($txt),'length equal'); utf_eq_ok($t,$txt,'text equal'); } my $sth=$dbh->prepare("SELECT ? as roundtrip, $len(?) as roundtriplen $fromdual"); ok(defined($sth),"prepare round-trip select statement unicode"); $sth->bind_param (1,$txt,SQL_WVARCHAR); $sth->bind_param (2,$txt,SQL_WVARCHAR); pass("bind WVARCHAR"); $sth->execute(); pass("execute"); my ($t,$tlen)=$sth->fetchrow_array(); pass('fetch'); cmp_ok($tlen,'==',length($txt),'length equal'); utf_eq_ok($t,$txt,'text equal'); } } $dbh->disconnect; } }; exit 0; DBD-ODBC-1.61/t/70execute_array_native.t0000755000175000017500000000761412456455576017150 0ustar martinmartin#!/usr/bin/perl -w -I./t # loads of execute_array and execute_for_fetch tests using DBD::ODBC's native methods use Test::More; use strict; #use Data::Dumper; use Config; $| = 1; my $has_test_nowarnings = 1; eval "require Test::NoWarnings"; $has_test_nowarnings = undef if $@; my ($dbh, $ea); use DBI qw(:sql_types); use ExecuteArray; BEGIN { plan skip_all => "DBI_DSN is undefined" if (!defined $ENV{DBI_DSN}); } END { if ($dbh && $ea) { $ea->drop_table($dbh); $dbh->disconnect(); } Test::NoWarnings::had_no_warnings() if ($has_test_nowarnings); done_testing(); } $dbh = DBI->connect(); unless($dbh) { BAIL_OUT("Unable to connect to the database $DBI::errstr\nTests skipped.\n"); exit 0; } { my $driver_name = DBI::neat($dbh->get_info(6)); my $dbms_name = $dbh->get_info(17); if (($driver_name =~ /odbcjt32.dll/i) || ($driver_name =~ /ACEODBC.DLL/i) || ($dbms_name eq 'ACCESS')) { # for OOB $has_test_nowarnings = 0; plan skip_all => 'MS Access does not support array operations'; } if ($driver_name =~ /sqlite/) { $has_test_nowarnings = 0; plan skip_all => "SQLite fails this test horribly - I can't find anywhere to report it"; } diag("\n\nNOTE: This is an experimental test. Since DBD::ODBC added the execute_for_fetch method this tests the native method and not DBI's fallback method. If you fail this test it probably means the ODBC driver you are using does not have sufficient support (or is buggy) for array operations. If you pass this test your ODBC Driver seems ok and you can get faster insert/update/delete operations using DBI's execute_array or execute_for_fetch methods by setting the odbc_array_operations to true. If this test fails it should not stop you installing DBD::ODBC but if it fails with an error other than something indicating 'connection busy' I'd strongly suggest you don't set odbc_array_operations and stick with DBI's default implementation. If this test fails for your driver I'd like to hear about it so I can compile a list of working drivers and perhaps pass bug reports on to the maintainers. Please rerun this test with TEST_VERBOSE set or using prove and send the results to the dbi-users mailing list.\n\n"); diag("\n"); diag("Perl $Config{PERL_REVISION}.$Config{PERL_VERSION}.$Config{PERL_SUBVERSION}\n"); diag("osname=$Config{osname}, osvers=$Config{osvers}, archname=$Config{archname}\n"); diag("Using DBI $DBI::VERSION\n"); diag("Using DBD::ODBC $DBD::ODBC::VERSION\n"); diag("Using DBMS_NAME " . DBI::neat($dbh->get_info(17)) . "\n"); diag("Using DBMS_VER " . DBI::neat($dbh->get_info(18)) . "\n"); diag("Using DRIVER_NAME $driver_name\n"); diag("Using DRIVER_VER " . DBI::neat($dbh->get_info(7)) . "\n"); diag("odbc_has_unicode " . $dbh->{odbc_has_unicode} . "\n"); } note("Using driver $dbh->{Driver}->{Name}"); $ENV{ODBC_DISABLE_ARRAY_OPERATIONS} = 0; # force array ops $ea = ExecuteArray->new($dbh, 0); # don't set odbc_disable_array_operations $dbh = $ea->dbh; $ea->drop_table($dbh); ok($ea->create_table($dbh), "create test table") or exit 1; $ea->simple($dbh, {array_context => 1, raise => 1}); $ea->simple($dbh, {array_context => 0, raise => 1}); $ea->error($dbh, {array_context => 1, raise => 1}); $ea->error($dbh, {array_context => 0, raise => 1}); $ea->error($dbh, {array_context => 1, raise => 0}); $ea->error($dbh, {array_context => 0, raise => 0}); $ea->row_wise($dbh, {array_context => 1, raise => 1}); $ea->update($dbh, {array_context => 1, raise => 1}); # do all the error ones again without specifying ArrayTupleStatus $ea->error($dbh, {array_context => 1, raise => 1, notuplestatus => 1}); $ea->error($dbh, {array_context => 0, raise => 1, notuplestatus => 1}); $ea->error($dbh, {array_context => 1, raise => 0, notuplestatus => 1}); $ea->error($dbh, {array_context => 0, raise => 0, notuplestatus => 1}); DBD-ODBC-1.61/t/pod-coverage.t0000644000175000017500000000067112250310263015106 0ustar martinmartin#!perl use Test::More; my $has_test_nowarnings = 1; eval "require Test::NoWarnings"; $has_test_nowarnings = undef if $@; my $tests = 1; $tests += 1 if $has_test_nowarnings; plan tests => $tests; END { Test::NoWarnings::had_no_warnings() if ($has_test_nowarnings); } pass('Pod::Coverage'); eval "use Test::Pod::Coverage 1.04"; diag("Test::Pod::Coverage 1.04 required for testing POD coverage") if $@; #all_pod_coverage_ok(); DBD-ODBC-1.61/t/rt_78838.t0000755000175000017500000000353212254015307013750 0ustar martinmartin#!/usr/bin/perl -w -I./t # # rt 78838 # # DBD::ODBC was stringifying input bound parameters. # This script creates an object with an overriden stringifcation method # and tests it is stringified when bound. # use strict; use warnings; use Test::More; use DBI; use_ok('ODBCTEST'); eval "require Test::NoWarnings"; my $has_test_nowarnings = ($@ ? undef : 1); BEGIN { if (!defined $ENV{DBI_DSN}) { plan skip_all => "DBI_DSN is undefined"; } } END { Test::NoWarnings::had_no_warnings() if ($has_test_nowarnings); done_testing(); } my $dbh = DBI->connect(); unless($dbh) { BAIL_OUT("Unable to connect to the database $DBI::errstr\nTests skipped.\n"); exit 0; } $dbh->{RaiseError} = 0; my $dbms_name = $dbh->get_info(17); ok($dbms_name, "got DBMS name: $dbms_name"); # 2 # this needs to be MS SQL Server if ($dbms_name !~ /Microsoft SQL Server/) { note('Not Microsoft SQL Server'); exit 0; } my $obj = new Object(); my $sth = $dbh->prepare(q/select ? AS result/); ok($sth, "statement prepared"); $sth->bind_param(1, $obj); SKIP: { skip "Failed to prepare statement", 4 if !$sth; $sth->execute(); my $fetched = $sth->fetchrow_arrayref->[0]; is($fetched, 'Object', "bound parameter correctly stringified"); # 1 bless $obj, 'Subject'; $sth->execute(); $fetched = $sth->fetchrow_arrayref->[0]; is($fetched, 'Object', "bound parameter copied and not a reference"); # 2 $sth->bind_param(1, 'fred'); $sth->execute(); $fetched = $sth->fetchrow_arrayref->[0]; is($fetched, 'fred', "rebound parameter correctly retrieved"); # 3 eval { $sth->bind_param(1, $obj); }; ok($@, "cannot bind a plain reference"); # 4 $sth = undef; } $dbh->disconnect; package Object; use overload '""' => 'to_s'; sub new() { bless { }, shift }; sub to_s() { my $self = shift; ref($self); } DBD-ODBC-1.61/t/82_table_info.t0000755000175000017500000000750412456453112015164 0ustar martinmartin#!/usr/bin/perl -w -I./t # # Test type_info # use strict; use warnings; use DBI; use Test::More; use Data::Dumper; use DBI::Const::GetInfoType; my $has_test_nowarnings = 1; eval "require Test::NoWarnings"; $has_test_nowarnings = undef if $@; my $dbh; BEGIN { if (!defined $ENV{DBI_DSN}) { plan skip_all => "DBI_DSN is undefined"; } } END { if ($dbh) { local $dbh->{PrintWarn} = 0; local $dbh->{PrintError} = 0; $dbh->do(q/drop table PERL_DBD_DROP_ME/); } }; $dbh = DBI->connect(); unless($dbh) { BAIL_OUT("Unable to connect to the database ($DBI::errstr)\nTests skipped.\n"); exit 0; } $dbh->{RaiseError} = 1; $dbh->{PrintError} = 0; $dbh->do(q/create table PERL_DBD_DROP_ME (a char(10))/); if ($dbh->get_info($GetInfoType{SQL_CATALOG_NAME}) ne 'N') { # test type_info('%','','') which should return catalogs only my $s = $dbh->table_info('%', '', ''); my $r = $s->fetchall_arrayref; if ($r && scalar(@$r)) { # assuming we get something back my $pass = 1; foreach my $row (@$r) { if (!defined($row->[0])) { $pass = 0; diag("Catalog is not defined"); } if (defined($row->[1])) { $pass = 0; diag("Schema is defined as $row->[1]"); } if (defined($row->[2])) { $pass = 0; diag("Table is defined as $row->[2]"); } } ok($pass, "catalogs only") or diag(Dumper($r)); } } if ($dbh->get_info($GetInfoType{SQL_SCHEMA_USAGE}) != 0) { # test type_info('','%','') which should return schema only my $s = $dbh->table_info('', '%', ''); my $r = $s->fetchall_arrayref; if ($r && scalar(@$r)) { # assuming we get something back my $pass = 1; foreach my $row (@$r) { if (defined($row->[0])) { $pass = 0; diag("Catalog is defined as $row->[0]"); } if (!defined($row->[1])) { $pass = 0; diag("Schema is not defined"); } if (defined($row->[2])) { $pass = 0; diag("Table is defined as $row->[2]"); } } ok($pass, "schema only") or diag(Dumper($r)); } } { # test type_info() - returns tables my $s = $dbh->table_info(undef, undef, 'PERL_DBD_DROP_ME'); my $r = $s->fetchall_arrayref; ok(scalar(@$r) > 0, 'table found'); if ($r && scalar(@$r)) { # assuming we get something back my $pass = 0; foreach my $row (@$r) { $pass = 1; } ok($pass, "table only") or diag(Dumper($r)); } } # test type_info('','','', '%') which should return table types only SKIP: { skip "SQLite is known to fail the next test because catalog, schema and table are returned as '' instead of undef", 1 if ($dbh->get_info($GetInfoType{SQL_DRIVER_NAME}) =~ /sqlite/); my $s = $dbh->table_info('', '', '', '%'); my $r = $s->fetchall_arrayref; if ($r && scalar(@$r)) { # assuming we get something back my $pass = 1; foreach my $row (@$r) { if (defined($row->[0])) { $pass = 0; diag("Catalog is defined as $row->[0]"); } if (defined($row->[1])) { $pass = 0; diag("Schema is defined as $row->[1]"); } if (defined($row->[2])) { $pass = 0; diag("Table is defined as $row->[2]"); } if (!defined($row->[3])) { $pass = 0; diag("table type is not defined"); } } ok($pass, "table type only") or diag(Dumper($r)); } }; Test::NoWarnings::had_no_warnings() if ($has_test_nowarnings); done_testing(); DBD-ODBC-1.61/t/rt_62033.t0000755000175000017500000000746712305117067013742 0ustar martinmartin#!/usr/bin/perl -w -I./t # # rt62033 - not really this rt but a bug discovered when looking in to it # # Check active is enabled on a statement after SQLMoreResults indicates # there is another result-set. # use Test::More; use strict; eval "require Test::NoWarnings"; my $has_test_nowarnings = ($@ ? undef : 1); use DBI qw(:sql_types); use_ok('ODBCTEST'); my $dbh; BEGIN { if (!defined $ENV{DBI_DSN}) { plan skip_all => "DBI_DSN is undefined"; } } END { if ($dbh) { eval { local $dbh->{PrintWarn} = 0; local $dbh->{PrintError} = 0; $dbh->do(q/drop table PERL_DBD_RT_62033/); }; } Test::NoWarnings::had_no_warnings() if ($has_test_nowarnings); done_testing(); } $dbh = DBI->connect(); unless($dbh) { BAIL_OUT("Unable to connect to the database $DBI::errstr\nTests skipped.\n"); exit 0; } $dbh->{RaiseError} = 1; $dbh->{ChopBlanks} = 1; $dbh->{PrintError} = 0; my $dbms_name = $dbh->get_info(17); ok($dbms_name, "got DBMS name: $dbms_name"); # 2 my $dbms_version = $dbh->get_info(18); ok($dbms_version, "got DBMS version: $dbms_version"); # 3 my $driver_name = $dbh->get_info(6); ok($driver_name, "got DRIVER name: $driver_name"); # 4 my $driver_version = $dbh->get_info(7); ok($driver_version, "got DRIVER version $driver_version"); # 5 my ($ev, $sth); # this needs to be MS SQL Server if ($dbms_name !~ /Microsoft SQL Server/) { note('Not Microsoft SQL Server'); exit 0; } eval { local $dbh->{PrintWarn} = 0; local $dbh->{PrintError} = 0; $dbh->do('drop table PERL_DBD_RT_62033'); }; # try and create a table to test with eval { $dbh->do( 'create table PERL_DBD_RT_62033 (a int identity, b char(10) not null)'); }; $ev = $@; if ($@) { BAIL_OUT("Failed to create test table - aborting test ($ev)"); exit 0; } pass('created test table'); sub doit { my $dbh = shift; my $expect = shift; # undef if we expect this to fail my $s = $dbh->prepare_cached( q/insert into PERL_DBD_RT_62033 (b) values(?);select @@identity/); eval {$s->execute(@_)}; if (!$expect) { # expected to fail ok($@, 'Error for constraint - expected'); note("For some drivers (freeTDS/MS SQL Server for Linux) there is no way out of this so expect further errors"); } else { ok(!$@, 'Execute ok') or diag($@); } # Some drivers won't like us calling SQLMoreResults/SQLDescribe etc # after the above if it errors. When we call odbc_more_results it actually # ends up doing a SQLDescribe. For most drivers I've tested they # are ok with this but a few (freeTDS) are not. The problem with freeTDS # is that if you then omit the SQLMoreResults and continue with this test # you'll get an SQL_ERROR from the next execute without an error msg # so it would seem there is no way to make this work in freeTDS as it # stands. # # Some drivers (basically all those I've tested except freeTDS) need you # to call SQLMoreResults even if the above fails or you'll get invalid # cursor state on the next statement (MS SQL Server and MS native client # driver). if ($s->{NUM_OF_FIELDS} == 0) { my $x = $s->{odbc_more_results}; } if ($expect) { # for the error case where we attempt to insert a NULL into column b # we'd expect odbc_more_results to return 0/false - there are no more # results my $identity; ($identity) = $s->fetchrow_array; #diag("identity = ", DBI::neat($identity), "\n"); is($identity, $expect, "Identity"); ($identity) = $s->fetchrow_array; } else { $s->finish; } } doit($dbh, undef, undef); doit($dbh, 2, 'fred'); eval { local $dbh->{PrintWarn} = 0; local $dbh->{PrintError} = 0; $dbh->do('drop table PERL_DBD_RT_62033'); }; DBD-ODBC-1.61/t/30Oracle.t0000755000175000017500000000404512254015045014111 0ustar martinmartin#!/usr/bin/perl -I./t -w use Test::More; $| = 1; my $has_test_nowarnings = 1; eval "require Test::NoWarnings"; $has_test_nowarnings = undef if $@; my $tests = 4; $tests += 1 if $has_test_nowarnings; plan tests => $tests; # use_ok('DBI', qw(:sql_types)); # can't seem to get the imports right this way use DBI qw(:sql_types); use_ok('ODBCTEST'); #use_ok('Data::Dumper'); # to help ActiveState's build process along by behaving (somewhat) if a dsn is not provided BEGIN { plan skip_all => "DBI_DSN is undefined" if (!defined $ENV{DBI_DSN}); } END { Test::NoWarnings::had_no_warnings() if ($has_test_nowarnings); } my $dbh = DBI->connect(); unless($dbh) { BAIL_OUT("Unable to connect to the database $DBI::errstr\nTests skipped.\n"); exit 0; } my $dbname = $dbh->get_info(17); # DBI::SQL_DBMS_NAME SKIP: { skip "Oracle tests not supported using " . $dbname, 3 unless ($dbname =~ /Oracle/i); $dbh->do("create or replace function PERL_DBD_TESTFUNC(a in integer, b in integer) return integer is c integer; begin if b is null then c := 0; else c := b; end if; return a * c + 1; end;"); my $sth = $dbh->prepare("{ ? = call PERL_DBD_TESTFUNC(?, ?) }"); my $value = undef; my $b = 30; $sth->bind_param_inout(1, \$value, 50, SQL_INTEGER); $sth->bind_param(2, 10, SQL_INTEGER); $sth->bind_param(3, 30, SQL_INTEGER); $sth->execute; is($value, 301); $b = undef; $sth->bind_param_inout(1, \$value, 50, SQL_INTEGER); $sth->bind_param(2, 20, SQL_INTEGER); $sth->bind_param(3, undef, SQL_INTEGER); $sth->execute; is($value,1); eval{$dbh->do("drop function PERL_DBD_TESTFUNC");}; $dbh->do("create or replace procedure PERL_DBD_TESTPROC(a in integer,b out integer) is begin b := a + 1; end;"); $sth = $dbh->prepare("{call PERL_DBD_TESTPROC(?,?)}"); $sth->bind_param(1, 10, SQL_INTEGER); $sth->bind_param_inout(2, \$value, 50, SQL_INTEGER); $sth->execute; is($value, 11); eval{$dbh->do("drop procedure PERL_DBD_TESTPROC");}; }; if (DBI->trace > 0) { DBI->trace(0); } $dbh->disconnect; DBD-ODBC-1.61/t/rt_79397.t0000755000175000017500000000374312254015316013755 0ustar martinmartin#!/usr/bin/perl -w -I./t # # rt 79397 # # Code that came from perl monks in node http://perlmonks.org/?node_id=989136 # If you bind an output parameter as undef initially then change it # the changed value may not get to the database. # use Test::More; use strict; use DBI; use_ok('ODBCTEST'); eval "require Test::NoWarnings"; my $has_test_nowarnings = ($@ ? undef : 1); my $dbh; BEGIN { if (!defined $ENV{DBI_DSN}) { plan skip_all => "DBI_DSN is undefined"; } } END { Test::NoWarnings::had_no_warnings() if ($has_test_nowarnings); done_testing(); } $dbh = DBI->connect(); unless($dbh) { BAIL_OUT("Unable to connect to the database $DBI::errstr\nTests skipped.\n"); exit 0; } $dbh->{RaiseError} = 0; my $dbms_name = $dbh->get_info(17); ok($dbms_name, "got DBMS name: $dbms_name"); # 2 my $dbms_version = $dbh->get_info(18); ok($dbms_version, "got DBMS version: $dbms_version"); # 3 my $driver_name = $dbh->get_info(6); ok($driver_name, "got DRIVER name: $driver_name"); # 4 my $driver_version = $dbh->get_info(7); ok($driver_version, "got DRIVER version $driver_version"); # 5 # this needs to be MS SQL Server if ($dbms_name !~ /Microsoft SQL Server/) { note('Not Microsoft SQL Server'); exit 0; } my $sth = $dbh->prepare(q/SELECT :foo, :bar/); ok($sth, "statement prepared"); SKIP: { skip "Failed to prepare statement", 6 if !$sth; my @cols = qw(foo bar); my %hsh; for (@cols) { # 2 ok($sth->bind_param_inout( "$_" => \$hsh{$_}, 0 ), "$_ bound"); } $hsh{foo} = 'abc'; $hsh{bar} = 123; my $r; ok($r = $sth->execute(), "execute first time"); # 3 SKIP: { skip "Failed to execute", 4 if !$r; my @arr = $sth->fetchrow_array; is($arr[0], 'abc', "p1 ok"); # 4 is($arr[1], '123', "p2 ok"); # 5 $hsh{bar} = 456; $sth->execute(); @arr = $sth->fetchrow_array; is($arr[0], 'abc', "p1 ok"); # 6 is($arr[1], '456', "p2 ok"); # 7 }; }; DBD-ODBC-1.61/t/01base.t0000755000175000017500000000135012254014765013620 0ustar martinmartin#!perl -w use Test::More; use strict; my $has_test_nowarnings = 1; eval "require Test::NoWarnings"; $has_test_nowarnings = undef if $@; my $tests = 6; $tests += 1 if $has_test_nowarnings; plan tests => $tests; require DBI; require_ok('DBI'); import DBI; pass("import DBI"); my $switch = DBI->internal; is(ref $switch, 'DBI::dr', "DBI->internal is DBI::dr"); my $drh; eval { $drh = DBI->install_driver('ODBC'); }; my $ev = $@; diag($ev) if ($ev); ok(!$ev, 'install ODBC'); SKIP: { skip "driver could not be loaded", 2 if $ev; is(ref $drh, 'DBI::dr', "Install ODBC driver OK"); ok($drh->{Version}, "Version is not empty"); } END { Test::NoWarnings::had_no_warnings() if ($has_test_nowarnings); } exit 0; DBD-ODBC-1.61/t/rt_50852.t0000755000175000017500000000376512254015261013741 0ustar martinmartin#!/usr/bin/perl -w -I./t use Test::More; use strict; $| = 1; my $has_test_nowarnings = 1; eval "require Test::NoWarnings"; $has_test_nowarnings = undef if $@; my $tests = 5; $tests += 1 if $has_test_nowarnings; plan tests => $tests; use DBI qw(:sql_types); use_ok('ODBCTEST'); # 1 my $dbh; BEGIN { plan skip_all => "DBI_DSN is undefined" if (!defined $ENV{DBI_DSN}); } END { if ($dbh) { eval { local $dbh->{PrintWarn} = 0; local $dbh->{PrintError} = 0; $dbh->do(q/drop table PERL_DBD_RT_50852/); }; $dbh->disconnect; } Test::NoWarnings::had_no_warnings() if ($has_test_nowarnings); # 6 } $dbh = DBI->connect(); unless($dbh) { BAIL_OUT("Unable to connect to the database $DBI::errstr\nTests skipped.\n"); exit 0; } my $sth; $dbh->{RaiseError} = 0; # # The odbc_force_bind_type should cover up the fact that most MS SQL Server # ODBC drivers cannot successfully describe the parameter in the following # SQL. # $dbh->{odbc_force_bind_type} = SQL_VARCHAR; my $dbname = $dbh->get_info(17); # DBI::SQL_DBMS_NAME SKIP: { skip "Microsoft SQL Server test not supported using $dbname", 4 unless ($dbname =~ /Microsoft SQL Server/i); eval { local $dbh->{PrintWarn} = 0; local $dbh->{PrintError} = 0; $dbh->do(q/drop table PERL_DBD_RT_50852/); }; pass('dropped test table'); # 2 eval { $dbh->do(q{CREATE TABLE PERL_DBD_RT_50852 (name nvarchar(255))}); $dbh->do(q{insert into PERL_DBD_RT_50852 values('frederick')}); }; my $ev = $@; ok(!$ev, 'set up test table'); # 3 SKIP: { skip 'Failed to setup test table', 2 if $ev; $sth = $dbh->prepare( q/select name from PERL_DBD_RT_50852 where charindex(?, name) = 1/); ok($sth, 'prepared sql'); #4 SKIP: { skip 'Failed to prepare SQL', 1 unless $sth; ok($sth->execute('fred'), 'execute sql') && $sth->finish; #5 }; }; }; exit 0; DBD-ODBC-1.61/t/08bind2.t0000755000175000017500000000663512456460664013735 0ustar martinmartin#!/usr/bin/perl -w -I./t use Test::More; use strict; $| = 1; my $has_test_nowarnings = 1; eval "require Test::NoWarnings"; $has_test_nowarnings = undef if $@; my $tests = 6; $tests += 1 if $has_test_nowarnings; plan tests => $tests; # use_ok('DBI', qw(:sql_types)); # can't seem to get the imports right this way use DBI qw(:sql_types); use_ok('ODBCTEST'); # 1 use_ok('Data::Dumper'); # 2 my $dbh; BEGIN { if (!defined $ENV{DBI_DSN}) { plan skip_all => "DBI_DSN is undefined"; } } END { if ($dbh) { ODBCTEST::tab_delete($dbh); } Test::NoWarnings::had_no_warnings() if ($has_test_nowarnings); } $dbh = DBI->connect(); unless($dbh) { BAIL_OUT("Unable to connect to the database $DBI::errstr\nTests skipped.\n"); exit 0; } ok(ODBCTEST::tab_create($dbh), "Create tables"); SKIP: { #skip "SQLDescribeParam not supported using " . $dbh->get_info(6) . "\n", 3, unless $dbh->func(58, 'GetFunctions'); $dbh->{RaiseError} = 0; $dbh->{PrintError} = 0; $dbh->{LongReadLen} = 10000; my $longstr = "This is a test of a string that is longer than 80 characters. It will be checked for truncation and compared with itself."; my $longstr2 = $longstr . " " . $longstr . " " . $longstr . " " . $longstr; my $longstr3 = $longstr2 . " " . $longstr2; my @data_no_dates = ( [ 1, 'foo', 'test1', undef, undef ], [ 2, 'bar', 'test1', undef, undef ], [ 3, 'bletch', 'test1', undef, undef], ); my @data_no_dates_with_long = ( [ 4, 'foo2', $longstr, undef, undef ], [ 5, 'bar2', $longstr2, undef, undef ], [ 6, 'bletch2', $longstr3, undef, undef], ); my @data_with_dates = ( [ 7, 'foo22', 'test3', "1998-05-13", "1998-05-13 00:01:00"], [ 8, 'bar22', 'test3', "1998-05-14", "1998-05-14 00:01:00"], [ 9, 'bletch22', 'test3', "1998-05-15", "1998-05-15 00:01:00"], [ 10, 'bletch22', 'test3', "1998-05-15", "1998-05-15 00:01:00.250"], [ 11, 'bletch22', 'test3', "1998-05-15", "1998-05-15 00:01:00.390"], [ 12, 'bletch22', 'test3', undef, undef], ); my $dbname = $dbh->get_info(17); # DBI::SQL_DBMS_NAME # turn off default binding of varchar to test this! $dbh->{odbc_default_bind_type} = 0; my $rc = ODBCTEST::tab_insert_bind($dbh, \@data_no_dates, 0); unless ($rc) { diag("These are tests which rely upon the driver to tell what the parameter type is for the column. This means you need to ensure you tell your driver the type of the column in bind_col().\n"); } ok($rc, "insert #1 various test data no dates, no long data"); # 3 $dbh->{PrintError} = 0; $rc = ODBCTEST::tab_insert_bind($dbh, \@data_no_dates_with_long, 0); ok($rc, "insert #2 various test data no dates, with long data"); # 4 $rc = ODBCTEST::tab_insert_bind($dbh, \@data_with_dates, 0); # warn "\nThis test is known to fail using Oracle's ODBC drivers for # versions 8.x and 9.0 -- please ignore the failure or, better yet, bug Oracle :)\n\n"; SKIP: { skip "Known to fail using Oracle's ODBC drivers 8.x and 9.x", 1 if (!$rc && $dbname =~ /Oracle/i); # Driver without SQLDescribeParam rarely support binding # dates/times without a type skip "SQLDescribeParam not supported using " . $dbh->get_info(17) . "\n", 1, unless $dbh->func(58, 'GetFunctions'); ok($rc, "insert #3 various test data data with dates"); # 5 }; ODBCTEST::tab_delete($dbh); }; exit(0); DBD-ODBC-1.61/README.unicode0000644000175000017500000000304012254016164014410 0ustar martinmartin# # Author Martin J. Evans (mjevans at cpan dot org) # This document is historical now. If all you want to know is what Unicode is supported by DBD::ODBC read the DBD::ODBC pod. The original Unicode support was written by Alexander Foken and posted as a patch. You can find Alexander's original README document in README.af (some of this is directly taken from that document). Around DBD::ODBC 1.14 Alexander's original patch was adapted to include optional Unicode support for UNIX and introduced into DBD::ODBC. In DBD::ODBC 1.17 the Unicode support in DBD::ODBC on both Windows and Unix was enhanced considerably to support unicode SQL strings, column names and connection strings (although see caveat below). Most of the unicode documentation that was here has now moved to the main DBD::ODBC pod. Attributions ============ A substantial part of the code to translate between UTF8 and UTF16 came from the unicode web site (Unicode Inc) and was authored by Mark E. Davis and updated by various (see ConvertUTF.c). This code was modified in minor ways to allow ConvertUTF16toUTF8 and ConvertUTF8toUTF16 to accept NULL target pointers and return the number of bytes required in the target string for the conversion. A substantial part (most in fact) of the original Unicode support in DBD::ODBC for wide bound columns and parameters was written by Alexander Foken and simply changed to support UNIX as well as Windows by me. Patches/Problems ================ Please see the main README. The best place to discuss DBD::ODBC is on the dbi-users mailing list. DBD-ODBC-1.61/ConvertUTF.h0000644000175000017500000001430712250310263014254 0ustar martinmartin#ifdef WITH_UNICODE /* * Copyright 2001-2004 Unicode, Inc. * * Disclaimer * * This source code is provided as is by Unicode, Inc. No claims are * made as to fitness for any particular purpose. No warranties of any * kind are expressed or implied. The recipient agrees to determine * applicability of information provided. If this file has been * purchased on magnetic or optical media from Unicode, Inc., the * sole remedy for any claim will be exchange of defective media * within 90 days of receipt. * * Limitations on Rights to Redistribute This Code * * Unicode, Inc. hereby grants the right to freely use the information * supplied in this file in the creation of products supporting the * Unicode Standard, and to make copies of this file in any form * for internal or external distribution as long as this notice * remains attached. */ /* * NOTE: The original version of this code can be found at * http://www.unicode.org/Public/PROGRAMS/CVTUTF/ * This version was slightly modified to allow ConvertUTF8toUTF16 and * ConvertUTF16toUTF8 to calculate the bytes required without writing to * the target buffer. * */ /* --------------------------------------------------------------------- Conversions between UTF32, UTF-16, and UTF-8. Header file. Several funtions are included here, forming a complete set of conversions between the three formats. UTF-7 is not included here, but is handled in a separate source file. Each of these routines takes pointers to input buffers and output buffers. The input buffers are const. Each routine converts the text between *sourceStart and sourceEnd, putting the result into the buffer between *targetStart and targetEnd. Note: the end pointers are *after* the last item: e.g. *(sourceEnd - 1) is the last item. The return result indicates whether the conversion was successful, and if not, whether the problem was in the source or target buffers. (Only the first encountered problem is indicated.) After the conversion, *sourceStart and *targetStart are both updated to point to the end of last text successfully converted in the respective buffers. Input parameters: sourceStart - pointer to a pointer to the source buffer. The contents of this are modified on return so that it points at the next thing to be converted. targetStart - similarly, pointer to pointer to the target buffer. sourceEnd, targetEnd - respectively pointers to the ends of the two buffers, for overflow checking only. These conversion functions take a ConversionFlags argument. When this flag is set to strict, both irregular sequences and isolated surrogates will cause an error. When the flag is set to lenient, both irregular sequences and isolated surrogates are converted. Whether the flag is strict or lenient, all illegal sequences will cause an error return. This includes sequences such as: , , or in UTF-8, and values above 0x10FFFF in UTF-32. Conformant code must check for illegal sequences. When the flag is set to lenient, characters over 0x10FFFF are converted to the replacement character; otherwise (when the flag is set to strict) they constitute an error. Output parameters: The value "sourceIllegal" is returned from some routines if the input sequence is malformed. When "sourceIllegal" is returned, the source value will point to the illegal value that caused the problem. E.g., in UTF-8 when a sequence is malformed, it points to the start of the malformed sequence. Author: Mark E. Davis, 1994. Rev History: Rick McGowan, fixes & updates May 2001. Fixes & updates, Sept 2001. ------------------------------------------------------------------------ */ /* --------------------------------------------------------------------- The following 4 definitions are compiler-specific. The C standard does not guarantee that wchar_t has at least 16 bits, so wchar_t is no less portable than unsigned short! All should be unsigned values to avoid sign extension during bit mask & shift operations. ------------------------------------------------------------------------ */ typedef unsigned long UTF32; /* at least 32 bits */ typedef unsigned short UTF16; /* at least 16 bits */ typedef unsigned char UTF8; /* typically 8 bits */ typedef unsigned char Boolean; /* 0 or 1 */ /* Some fundamental constants */ #define UNI_REPLACEMENT_CHAR (UTF32)0x0000FFFD #define UNI_MAX_BMP (UTF32)0x0000FFFF #define UNI_MAX_UTF16 (UTF32)0x0010FFFF #define UNI_MAX_UTF32 (UTF32)0x7FFFFFFF #define UNI_MAX_LEGAL_UTF32 (UTF32)0x0010FFFF typedef enum { conversionOK, /* conversion successful */ sourceExhausted, /* partial character in source, but hit end */ targetExhausted, /* insuff. room in target for conversion */ sourceIllegal /* source sequence is illegal/malformed */ } ConversionResult; typedef enum { strictConversion = 0, lenientConversion } ConversionFlags; /* This is for C++ and does no harm in C */ #ifdef __cplusplus extern "C" { #endif ConversionResult ConvertUTF8toUTF16 ( const UTF8** sourceStart, const UTF8* sourceEnd, UTF16** targetStart, UTF16* targetEnd, ConversionFlags flags, unsigned int *bytes); ConversionResult ConvertUTF16toUTF8 ( const UTF16** sourceStart, const UTF16* sourceEnd, UTF8** targetStart, UTF8* targetEnd, ConversionFlags flags, unsigned int *bytes_converted); ConversionResult ConvertUTF8toUTF32 ( const UTF8** sourceStart, const UTF8* sourceEnd, UTF32** targetStart, UTF32* targetEnd, ConversionFlags flags); ConversionResult ConvertUTF32toUTF8 ( const UTF32** sourceStart, const UTF32* sourceEnd, UTF8** targetStart, UTF8* targetEnd, ConversionFlags flags); ConversionResult ConvertUTF16toUTF32 ( const UTF16** sourceStart, const UTF16* sourceEnd, UTF32** targetStart, UTF32* targetEnd, ConversionFlags flags); ConversionResult ConvertUTF32toUTF16 ( const UTF32** sourceStart, const UTF32* sourceEnd, UTF16** targetStart, UTF16* targetEnd, ConversionFlags flags); Boolean isLegalUTF8Sequence(const UTF8 *source, const UTF8 *sourceEnd); #ifdef __cplusplus } #endif /* --------------------------------------------------------------------- */ #endif /* WITH_UNICODE */ DBD-ODBC-1.61/fixup_t.h0000644000175000017500000000036312250310263013730 0ustar martinmartin /* fix up types for driver managers that don't have them */ /* (basically iODBC) */ #ifndef SQLCHAR #define SQLCHAR char #endif #ifndef SQLSMALLINT #define SQLSMALLINT I16 #endif #ifndef SQLUINTEGER #define SQLUINTEGER U32 #endif DBD-ODBC-1.61/FAQ0000644000175000017500000014731612654415413012460 0ustar martinmartinuse strict; =encoding utf8 =head1 NAME DBD::ODBC::FAQ - Frequently Asked Questions for DBD::ODBC =head1 SYNOPSIS perldoc DBD::ODBC::FAQ =head1 VERSION ($Revision$) =head1 QUESTIONS =head2 How do I read more than N characters from a Memo | BLOB | LONG field? See LongReadLen in the DBI docs. Example: $dbh->{LongReadLen} = 20000; $sth = $dbh->prepare("select long_col from big_table"); $sth->execute; etc =head2 What is DBD::ODBC? =head2 Why can't I connect? =head2 Do I need an ODBC driver? =head2 What is the ODBC driver manager? These, general questions lead to needing definitions. =over 4 =item ODBC Driver The ODBC Driver is the driver that the ODBC manager uses to connect and interact with the RDBMS. You B need this to connect to any database. For Win32, they are plentiful and installed with many applications. For Linux/Unix, you can find a fairly comprehensive list at L. =item ODBC Driver Manager The ODBC driver manager is the interface between an ODBC application (DBD::ODBC in this case) and the ODBC driver. The driver manager principally provides the ODBC API so ODBC applications may link with a single shared object (or dll) and be able to talk to a range of ODBC drivers. At run time the application provides a connection string which defines the ODBC data source it wants to connect to and this in turn defines the ODBC driver which will handle this data source. The driver manager loads the requested ODBC driver and passes all ODBC API calls on to the driver. In this way, an ODBC application can be built and distributed without knowing which ODBC driver it will be using. However, this is a rather simplistic description of what the driver manager does. The ODBC driver manager also: * Controls a repository of installed ODBC drivers (on UNIX this is the file odbcinst.ini). * Controls a repository of defined ODBC data sources (on UNIX these are the files odbc.ini and .odbc.ini). * Provides the ODBC driver APIs (SQLGetPrivateProfileString and SQLWritePrivateProfileString) to read and write ODBC data source attributes. * Handles ConfigDSN which the driver exports to configure data sources. * Provides APIs to install and uninstall drivers (SQLInstallDriver). * Maps ODBC versions e.g. so an ODBC 2.0 application can work with an ODBC 3.0 driver and vice versa. * Maps ODBC states between different versions of ODBC. * Provides a cursor library for drivers which only support forward-only cursors. * Provides SQLDataSources and SQLDrivers so an application can find out what ODBC drivers are installed and what ODBC data sources are defined. * Provides an ODBC administrator which driver writers can use to install ODBC drivers and users can use to define ODBC data sources. The ODBC Driver Manager is the piece of software which interacts with the drivers for the application. It "hides" some of the differences between the drivers (i.e. if a function call is not supported by a driver, it 'hides' that and informs the application that the call is not supported. DBD::ODBC needs this to talk to drivers. Under Win32, you usually get the ODBC Driver Manager as part of the OS. Under Unix/Linux you may have to find and build the driver manager yourself. The two main driver managers for Unix are unixODBC (L) and iODBC (L). B For a reasonable description of ODBC on Unix/Linux see L =item DBD::ODBC DBD::ODBC uses the driver manager to talk to the ODBC driver(s) on your system. You need both a driver manager and driver installed and tested before working with DBD::ODBC. You need to have a DSN (see below) configured and B before being able to test DBD::ODBC. =item DSN (Data Source Name) The DSN is a way of referring to a particular driver and database by any name you wish. The DSN is usually a key to a list of attributes the ODBC driver needs to connect to the database (e.g. ip address and port) but there is always a key which names the driver so the driver manager knows which driver to use with which data source. Do no confuse DSNs with ODBC connection strings or DBI's "$data_source" (the first argument to L. The $data_source argument to DBI is composed of 'dbi:DRIVER:something_else' where DRIVER is the name of the DBD driver you want to use (ODBC of course for DBD::ODBC). The "something_else" for DBD::ODBC can be a DSN name or it can be a normal ODBC connection string. An ODBC connection string consists of attribute/value pairs separated with semicolons (;). You can replace "something_else" above with a normal ODBC connection string but as a special case for DBD::ODBC you can just use the DSN name without the usual ODBC connection string prefix of "DSN=dsn_name". e.g. =over =item dbi:ODBC:DSN=fred ODBC connection string using fred DSN =item dbi:ODBC:fred Same as above (a special case). =item dbi:ODBC:Driver={blah blah driver};Host=1.2.3.4;Port=1000; This is known as a DSN-less connection string for obvious reasons. =back =back =head2 Where do I get an ODBC driver manager for Unix/Linux? DBD::ODBC used to come bundled with a driver manager but this became inconvenient when the driver manager was updated. The two main ODBC Driver Managers for Unix are unixODBC (L) and iODBC (L). If you are running a packaged Linux like RedHat, Ubuntu, Fedora, Suse etc etc you'll usually find it packaged with unixODBC and using the package manager to install it is fairly straight forward. However, make sure that if the driver manager is split into multiple packages you install the development package as well as that contains the C header files required by DBD::ODBC. If you cannot find an ODBC Driver Manager package for your OS you can download the source tar files for either of the driver managers above and build it yourself. =head2 How do I access a MS SQL Server database from Linux/UNIX? You have loads of choices (in no particular order): * using DBI::ProxyServer or DBD::Gofer. You'll need the former if you use transactions. * using a commercial ODBC Driver or bridge like the ones from Easysoft or Openlink. * using FreeTDS an open source TDS library which includes an ODBC Driver. * using DBD::Sybase and Sybase libraries. =head2 How do I access a MS-Access database from Linux? There are basically three choices: * a commercial MS Access ODBC Driver like the one from Easysoft. * a commercial ODBC Bridge like the ones from Easysoft or OpenLink. * using mdbtools although as of writing it has not been updated since June 2004, only provides read access and seems to be a little buggy. =head2 Almost all of my tests for DBD::ODBC fail. They complain about not being able to connect or the DSN is not found. Please, please test your configuration of ODBC and driver before trying to test DBD::ODBC. Most of the time, this stems from the fact that the DSN (or ODBC) is not configured properly. unixODBC comes with a small program isql and iODBC comes with odbctest and you should use these to test your ODBC configuration is working properly first. =head2 I'm attempting to bind a Long Var char (or other specific type) and the binding is not working. The code I'm using is below: $sth->bind_param(1, $str, $DBI::SQL_LONGVARCHAR); ^^^ The problem is that DBI::SQL_LONGVARCHAR is not the same as $DBI::SQL_LONGVARCHAR and that $DBI::SQL_LONGVARCHAR is an error! It should be: $sth->bind_param(1, $str, DBI::SQL_LONGVARCHAR); =head2 Does DBD::ODBC support Multiple Active Statements? Multiple Active Statements (MAS) are concurrent statements created from the same database handle which both have pending actions on them (e.g. they both have executed a select statement but not retrieved all the available rows yet). DBD::ODBC does support MAS but whether you can actually use MAS is down to the ODBC Driver. By default MS SQL Server did not used to support multiple active statements if any of them were select statements. You could get around this (with caution) by changing to a dynamic cursor. There is a "hack" in DBD::ODBC which can be used to enable MAS but you have to fully understand the implications of doing so(see L and L). In MS SQL Server 2005, there is a new thing called MARS (Multiple Active Result Sets) which allows multiple active select statements but it has some nasty implications if you are also doing transactions. To enable MARS from DBD::ODBC add "MARS_Connection=Yes" to the connection string as in: $h->DBI->connect('dbi:ODBC:DSN=mydsn;MARS_Connection=Yes;'); NOTE: Even though you may be using MS SQL Server 2005 if you are using MS SQL Server drivers you will need to use the Native Client Driver or a later MS SQL Server ODBC driver (2008 or later) to use MARS. For other drivers it depends. I believe various Oracle ODBC drivers do support multiple active statements as myodbc does. Think carefully before using multiple active statements. It is probably not portable and there is nearly always a better way of doing it. If anyone wants to report success with a particular driver and multiple active statements I will collect them here. Also see L =head2 Why do I get "Datetime field overflow" when attempting to insert a date into Oracle? If you are using the Oracle or Microsoft ODBC drivers then you may get the following error when inserting dates into an Oracle database: [Oracle][ODBC]Datetime field overflow. (SQL-22008) If you do then check v$nls_parameters and v$parameter to see if you are using a date format containing the RR format. e.g., select * from v$nls_parameters where parameter = 'NLS_DATE_FORMAT' select * from v$parameter where name = 'nls_date_format' If you see a date format like 'DD-MON-RR' (e.g., contains an RR) then all I can suggest is you change the date format for your session as I have never been able to bind a date using this format. You can do this with: alter session set nls_date_format='YYYY/MM/DD' and use any format you like but keep away from 'RR'. You can find some test code in the file examples/rtcpan_28821.pl which demonstrates this problem. This was originally a rt.cpan issue which can be found at L. As an aside, if anyone is reading this and can shed some light on the problem I'd love to hear from you. The technical details are: create table rtcpan28821 (a date) insert into rtcpan28821 values('23-MAR-62') fails Looking at the ODBC trace, C returns: data type: 93, SQL_TYPE_TIMESTAMP size: 19 decimal digits: 0 nullable: 1 and DBD::ODBC calls SQLBindParameter with: ValueType: SQL_C_CHAR ParameterType: SQL_TYPE_TIMESTAMP ColumnSize: 9 DecimalDigits: 0 Data: 23-MAR-62 BufferLength: 9 =head2 Why do my SQL Server temporary objects disappear? If you are creating temporary objects (e.g., temporary tables) in SQL Server you find they have disappeared when you attempt to use them. Temporary objects only have a lifetime of the session they are created in but in addition, they cannot be created using prepare/execute. e.g., the following fails: $s = $h->prepare('select * into #tmp from mytable'); $s->execute; $s = $h->selectall_arrayref('select * from #tmp'); with "Invalid object name '#tmp'". Your should read L which basically says I. The proper way to avoid this is to use the C method but if you cannot do that then you need to add the L attribute to your prepare as follows: my $s = $h->prepare('select * into #tmp from mytable', { odbc_exec_direct => 1}); See L. =head2 Why cannot I connect to my data source on Windows 64? If you are running a 32bit Perl on a 64bit Windows machine you will need to be aware there are two ODBC administrators and you need to create your DSNs with the right one. The ODBC Administrator you get to from Control Panel, Administrative Tools, Data Sources is the 64bit one and data sources created here will not be visible or useable from 32bit applications. The ODBC administrator you need to use for 32bit applications can be found at X:\windows\syswow64\odbcad32.exe. You can find more about this than you'd probably care to know at http://www.easysoft.com/developer/interfaces/odbc/64-bit.html =head2 How do I use DBD::ODBC with web servers under Win32. =over 4 =item General Commentary re web database access This should be a DBI faq, actually, but this has somewhat of an Win32/ODBC twist to it. Typically, the Web server is installed as an NT service or a Windows 95/98 service. This typically means that the web server itself does not have the same environment and permissions the web developer does. This situation, of course, can and does apply to Unix web servers. Under Win32, however, the problems are usually slightly different. =item Defining your DSN -- which type should I use? Under Win32 take care to define your DSN as a system DSN, not as a user DSN. The system DSN is a "global" one, while the user is local to a user. Typically, as stated above, the web server is "logged in" as a different user than the web developer. This helps cause the situation where someone asks why a script succeeds from the command line, but fails when called from the web server. =item Defining your DSN -- careful selection of the file itself is important! For file based drivers, rather than client server drivers, the file path is VERY important. There are a few things to keep in mind. This applies to, for example, MS Access databases. 1) If the file is on an NTFS partition, check to make sure that the Web B user has permissions to access that file. 2) If the file is on a remote computer, check to make sure the Web B user has permissions to access the file. 3) If the file is on a remote computer, try using a UNC path the file, rather than a X:\ notation. This can be VERY important as services don't quite get the same access permissions to the mapped drive letters B, more importantly, the drive letters themselves are GLOBAL to the machine. That means that if the service tries to access Z:, the Z: it gets can depend upon the user who is logged into the machine at the time. (I've tested this while I was developing a service -- it's ugly and worth avoiding at all costs). Unfortunately, the Access ODBC driver that I have does not allow one to specify the UNC path, only the X:\ notation. There is at least one way around that. The simplest is probably to use Regedit and go to (assuming it's a system DSN, of course) HKEY_LOCAL_USERS\SOFTWARE\ODBC\"YOUR DSN" You will see a few settings which are typically driver specific. The important value to change for the Access driver, for example, is the DBQ value. That's actually the file name of the Access database. =back =head2 How do I connect without DSN The ability to connect without a full DSN was introduced in version 0.21. Example (using MS Access): my $DSN = 'driver=Microsoft Access Driver(*.mdb);dbq=\\\\cheese\\g$\\perltest.mdb'; my $dbh = DBI->connect("dbi:ODBC:$DSN", '','') or die "$DBI::errstr\n"; The above sample uses Microsoft's UNC naming convention to point to the MSAccess file (\\cheese\g$\perltest.mdb). The dbq parameter tells the access driver which file to use for the database. Example (using MSSQL Server): my $DSN = 'driver={SQL Server};Server=server_name;database=database_name;uid=user;pwd=password;'; my $dbh = DBI->connect("dbi:ODBC:$DSN") or die "$DBI::errstr\n"; =head2 Why do I get data truncated error from SQL Server when inserting with parameters? Please see "Why am I getting errors with bound parameters" below which collects all parameter issues together in one FAQ item. =head2 Why do I get invalid value for cast specification (22018) from SQL Server when inserting with parameters? Please see "Why am I getting errors with bound parameters" below which collects all parameter issues together in one FAQ item. =head2 Why do I get strange results with SQL Server and named parameters? If you are using a MS SQL Server driver and named parameters to procedures be very careful to use then in the same order they are defined in the procedure. i.e., if you have a procedure like this: create procedure test @param1 varchar(50), @param2 smallint as begin .. end then ensure if you call it using named parameters you specify them in the same order they are declared: exec test @param1=?,@param2=? and not exec test @param2=?,@param1=? The reason for this is that all SQL Server drivers we have seen describe procedures parameters in the order they are declared and ignore the order they are used in the SQL. If you specify them out of order DBD::ODBC will get details on p1 which are really for p2 etc. This can lead to data truncation errors and all sorts of other problems it is impossible for DBD::ODBC to spot or workaround. =head2 Why do I get "Numeric value out of range" when binding dates in Oracle? Also see "Why do I get "Datetime field overflow" when attempting to insert a date into Oracle?". Here is some example code binding dates; some work, some don't, see comments. use DBI; use strict; # table is "create table martin (a date, b int)" my $h = DBI->connect; $h->do(q{alter session set nls_date_format='DD-MON-YY'}); my $s = $h->prepare(q{select * from v$nls_parameters where parameter = 'NLS_DATE_FORMAT'}); $s->execute; print DBI::dump_results($s); my $date = '30-DEC-99'; my $dateodbc = qq/{ d '1999-12-30'}/; # the following works ok - resulting in 2099-12-30 being inserted $h->do(qq{insert into martin values ('$date', 1)}); # the following works resulting in 1999-12-30 being inserted $h->do(qq{insert into martin values ($dateodbc, 2)}); # fails eval { my $s = $h->prepare(q{insert into martin values(?,3)}); $s->bind_param(1, $date); # fails # Numeric value out of range: invalid character in date/time string (SQL-22003) $s->execute; }; # works resulting in 2099-12-30 being inserted eval { my $s = $h->prepare(q{insert into martin values(?,4)}); $s->bind_param(1, $date, DBI::SQL_VARCHAR); $s->execute; }; # works resulting in 1999-12-30 being inserted eval { my $s = $h->prepare(q{insert into martin values(?,5)}); $s->bind_param(1, $dateodbc); $s->execute; }; In general, when using an ODBC driver you should use the ODBC syntax for dates, times and timestamps as those are the only formats an ODBC has to support. In the above case with Oracle, the date parameter is described as a SQL_TYPE_DATE SQL type so by default DBD::ODBC binds your parameter as a SQL_TYPE_DATE. If you use '30-DEC-99' then that means the C type is SQL_CHAR and the SQL type is SQL_TYPE_DATE so the driver is forced to parse the date before sending it to Oracle (that would mean knowing what your NLS_DATE_FORMAT is and it would also mean knowing all the magic special characters Oracle can use to define date formats). If you override the bind type to SQL_VARCHAR then the driver sees SQL_CHAR => SQL_VARCHAR, nothing to do and hence Oracle itself does the translation - that is why the SQL_VARCHAR works. =head2 CWB0111 error with System i Access ODBC Driver The full error this relates to is: [unixODBC][IBM][System i Access ODBC Driver]Column 1: CWB0111 - A buffer passed to a system call is too small to hold return data (SQL-22018) The iSeries ODBC driver looks at your environment and decides that if you have UTF-8 set it will encode data returned from the database in UTF-8. e.g., LC_CTYPE=fr_FR.UTF-8 If you then select data from your database containing non-ASCII characters e.g., accented characters the iSeries ODBC driver will encode the data in UTF-8. UTF-8 encoding increases the size of strings containing characters with codes > 127. DBD::ODBC uses SQLDescribeCol and SQLColAttribute ODBC calls to work out the size of the columns you are retrieving and allocate space for them. As the ODBC API specifies the sizes returned are on bytes when the driver says a string column is N is size DBD::ODBC allocates N+1 (for NULL) bytes. If the driver then encodes N characters in UTF-8 the size will be too big to fit into DBD::ODBC's buffer and you will get the error above. This is most often seen with char(N) columns as the trailing spaces are returned by the driver so you are bound to overflow the buffer as soon as a non-ASCII characters is found. What are your possible solutions? You can attempt to trim the data to leave room for the encoding. e.g., RTRIM(column_name) in your select on char(N) columns but this is a poor choice and only going to work in a few circumstances. You can increase the sizes of your columns in the database but this is another hack. You can rearrange your SQL to cast the columns in question to larger types. Remove UTF-8 from your locale. This is the best solution as it is guaranteed to stop this error but if you have data which cannot be represented in 8 bit characters this won't help. =head2 "ConnectionWrite (send())" error and bind parameters The following error can happen when using more than 2097 bind parameters in a single query with SQL Server 2000: [Microsoft][ODBC SQL Server Driver][DBNETLIB]ConnectionWrite (send()). (SQL-01000) [state was 01000 now 08S01] [Microsoft][ODBC SQL Server Driver][DBNETLIB]General network error. Check your network documentation. (SQL-08S01) This error is most likely due to a bug in the Microsoft SQL Server ODBC driver as it fails for some versions of the driver and not others. It is known to fail for version 2000.85.1132.00 See bug report https://rt.cpan.org/Public/Bug/Display.html?id=49061 for more details. =head2 SQL query length limited to about 65500 characters in SQL Server 2000 When using bind parameters and a query longer than about 65500 characters the query will fail with some versions of the SQL Server 2000 ODBC driver. The error message from the server can vary. Below is an example: [Microsoft][ODBC SQL Server Driver][SQL Server]Invalid column name 'TES[...]P1I'. (SQL-42S22) [state was 42S22 now 42000] [Microsoft][ODBC SQL Server Driver][SQL Server]Statement(s) could not be prepared. (SQL-42000) Removing the use of binding will allow queries that are longer than 65500 characters. This bug is known to affect Microsoft SQL Server ODBC driver version 2000.85.1132.00. See bug report https://rt.cpan.org/Public/Bug/Display.html?id=49065 =head2 HY009, "invalid length or pointer" from SQLite See rt 52651 at http://rt.cpan.org/Public/Bug/Display.html?id=52651. During connection if you get a HY009 "invalid length or pointer" error it is a bug in SQLSetConnectAttr in SQLite and you will need a version at least 0.85pre1. =head2 Where do I get the latest MDAC Development Kit? MS keep moving this around. If you want to build DBD::ODBC yourself from source you'll need the latest Microsoft Data Access Components (MDAC) Software Development Kit. You can get it as part of the Platform Development Kit, with some of the Visual Studio versions and occasionally from: http://msdn.microsoft.com/en-us/data/aa937730.aspx where in April 2010 it listed the "Microsoft Data Access Componetns (MDAC) 2.8 Software Development Kit. =head2 Why does DBD::ODBC fail to compile with missing definitions for SQLLEN/SQLULEN? This happens because Microsoft changed their headers to add SQLLEN/SQLULEN types and your C headers are probably out of date. As DBD::ODBC needs to use these types you'll need an updated MDAC Development Kit. See " Where do I get the latest MDAC Development Kit?". =head2 Why do I get errors with bound parameters and MS SQL Server? See the question "Why do I get data truncated error from SQL Server when inserting with parameters?" above. These errors are often because of bugs in the MS SQL Server ODBC driver in its SQLBindParameter implementation and can be worked around by specifying a type at bind time. e.g., Instead of: my $s = prepare(q/some sql with parameters/); $s->execute($param1, $param2); try: my $s = prepare(q/some sql with parameters/); $s->bind_param(1, $param1, {TYPE => SQL_VARCHAR}); $s->bind_param(2, $param2, {TYPE => SQL_VARCHAR}); $s->execute; See https://connect.microsoft.com/SQLServer/feedback/details/527188/paramater-datatype-lookup-returns-incorrectly and rt ticket 50852. =head2 Why does my script pause for a while whenever my statement handle is destroyed (goes out of scope)? The symptom is that sometimes when your statement handle goes out of scope and is hence destroyed your script pauses for a while. If you are using MS SQL Server and certain MS SQL Server ODBC Drivers this can happen when you issue a select which would return a lot of rows but you don't fetch them all. The problem is that the TDS protocol (normally, without Multiple Active Statement support, or MARS) sends all the result-set down the socket until it is consumed by the client end. When your statement handle is destroyed with pending results the ODBC Driver needs to read all the results to clear the socket. In reality MS SQL Server will only write so many rows at a time to the socket depending on its buffer size and will occasionally look at the socket for new requests so it is possible for ODBC Drivers which support SQLCancel to reduce the number of rows sent by using DBI's cancel method. In this way the statement destruction is speeded up since fewer rows you don't need are sent. See DBI's cancel method and if you destroy a statement handle with pending results, call cancel before destruction. However, you are best not selecting rows you have no intention of retrieving. See cancel_big_fetch.pl in the DBD::ODBC examples dir. NOTE: more recent MS SQL Server drivers are better in this respect and sometimes the test script cancel_big_fetch.pl shows no difference. =head2 Why does my backup/restore/some_other_procedure in MS SQL Server not complete? MS SQL Server batches up results in a procedure. A result may be the output of a print or a select or in some cases even an insert/update (see SET NOCOUNT ON|OFF). If you attempt to call a procedure using the C method and it outputs results (e.g., a print statement saying the % completed) the procedure probably will not fully complete. Instead you should do the following: $sth-prepare(call to my procedure); $sth->execute; do { while (my @row = $sth->fetchrow_array()) { # do stuff here } } while ($sth->{odbc_more_results}); # do not forget to check $sth->err here if not using RaiseError as # the outer while loop will stop when there are no more results OR # if an error occurs. =head2 Why do I get "The data types ntext and varchar are incompatible in the equal to operator"? Or "The data types ntext and nvarchar(max) are incompatible in the equal to operator". MS SQL Server does not like equality comparisons with ntext columns. You can get this error without using any Perl or DBD::ODBC simply by doing: select * from mytable where ntext_column = 'hello' You unfortunately need to change your SQL to: select * from mytable where CAST(ntext_column AS nvarchar(max)) = 'hello' =head2 Why are my integers returned as decimals? If you are using the MS SQL Server ODBC driver and get integers/booleans back as apparently decimals e.g. 0.00 instead of 0 and integer primary keys as nn.nn you've probably either: =over =item enabled regional settings in the ODBC DSN setup (called "use regional settings with outputting..." =item added "Regional=Yes" to your connection string. The MS SQL Server ODBC Driver regional settings are massively flawed and break lots of applications - turn it off. =back =head2 Why does Connect call fail on Ubuntu with a "undefined symbol SQLxxx" error? Sometimes SQLxxx is SQLAllocHandle or SQLFetch but it could almost be any ODBC API. Did you build DBD::ODBC against iODBC? Some versions of Ubunutu Linux seem to install the libiodbc shared object without a libiodbc.so symbolic link. The key giveaway when you build DBD::ODBC is a warning like this: Note (probably harmless): No library found for -liodbc.so You can fix this manually by creating a symbolic link something like this: sudo ln /usr/lib/libiodbc.so.2 /usr/lib/libiodbc.so but to be honest this just moved the problem on for me and I've never got iODBC working on Ubuntu. I suggest you remove iODBC and installing the unixodbc, unixodbc-dev and unixodbc-bin packages. =head2 Why does Connect fail with "Option type out of range" You are probably setting an ODBC connection option like odbc_SQL_ROWSET_SIZE in the connect method. The problem is that some options can be set in ODBC with SQLSetConnectOption but not retrieved (silly but that is how some of the driver managers work). In the past this did not matter but a small bug in DBI introduced years ago causes attribute values to be fetched. You need to get a DBI release newer than 1.615 or revision 14463 from the DBI subversion repository. The change was: --- dbi/trunk/DBI.pm (original) +++ dbi/trunk/DBI.pm Thu Oct 7 02:45:05 2010 @@ -717,7 +717,8 @@ $dbh->{$a} = delete $apply->{$a}; } while ( my ($a, $v) = each %$apply) { - eval { $dbh->{$a} = $v } or $@ && warn $@; + eval { $dbh->{$a} = $v }; # assign in void context to avoid re-FETCH + warn $@ if $@; } } =head2 Why do I get undefined symbol my_snprintf? It is my fault. Basically, I changed DBD::ODBC to use Devel::PPPort's my_snprintf instead of sprintf (for safety) but because of the way DBI includes ppport.h (as dbipport.h) I cannot override it in DBD::ODBC. This should now be fixed in DBI but I cannot retrofit it so if you get this error you need to upgrade DBI to at least version 1.609. Sorry, some things are as they are. From DBD::ODBC 1.26_1 a requirement is DBI 1.609 so this should not be an issue but the 1.24 and 1.25 series DBD::ODBC did not have this requirement. =head2 Why can I appear to insert more than 255 chrs into a MS Access text column but when I read them back I only get 255 chrs? The simple answer is that MS Access only supports 255 characters in a text column. You can see this if you create a table with a text column then examine the PRECISION attribute (see DBI) and it returns 255. Unfortunately some versions of the MS Access ODBC driver silently truncate the data instead of issuing a data truncation error so you are not aware of the truncation until you read it back. Someone pointed out you can insert more than 255 characters into a MS Access text column with DBD::ADO but I believe if you look at the column types after creating the table with DBD::ADO the text column is really a memo column. =head2 Why am I getting errors with bound parameters? There are various problems with parameter binding in ODBC Drivers most of them down to bugs in the ODBC drivers. I created this FAQ to try and bring them all together in one place and other FAQ entries point at this one as a number of them boil down to a single problem. DBD::ODBC used to (many many versions ago) bind all parameters as SQL_VARCHAR and this mostly works because a SQL_VARCHAR can almost always be converted to any other SQL type. However, because of bugs in some ODBC drivers, additional SQL types which work in mysterious ways (like varchar(max) in MS SQL Server) and dates, DBD::ODBC was changed to ask the ODBC Driver (C) about parameters before binding them. There are a few things to note first: =over =item default bind type Some ODBC Drivers do not support C (e.g. freeTDS) and in those cases DBD::ODBC reverts to its old behaviour of binding everything as a SQL_VARCHAR or SQL_WVARCHAR (UNICODE build). In some rare cases the default bind type might not be what you want. In addition, if C is supported but fails the default bind type is used. =item overriding the default bind type You can always override the default bind type used when C is not supported using odbc_default_bind_type. =item forcing a bind type Sometimes the ODBC Driver can support C but get the answer wrong e.g., "select myfunc(?) where 1 = 1)" often causes a problem because the parameter does not correspond to a real column in a table. In these cases you can use odbc_force_bind_type to stop DBD::ODBC calling C and use the specified type instead. However, adding TYPE => xxx to the bind_param call is nearly always better (as it is more specific) and always overrides odbc_force_bind_type. =item specifying a bind type on the bind_param call In all cases this overrides anything else that DBD::ODBC might do and bare in mind parameter bind types set in this way are "sticky" (see DBI). This is usually the best method unless you prefer to use a cast in your SQL. =back Now to the specific problems: =over =item data truncated error DBD::ODBC attempts to use the ODBC API C to obtain information about parameters in parameterised SQL. e.g., insert into mytable (column1) values(?) The C is a parameter marker. You supply the parameter value (in this case parameter 1) with a call to the C method or by adding the parameter to the C method call. When DBD::ODBC sees the parameter marker in the SQL it will call C to obtain information about the parameter size and type etc (assuming your ODBC driver supports C). When you call C in the MS SQL Server ODBC driver the driver will scan your SQL attempting to discover the columns in your database the parameters align with. e.g., in the above case the parameter to be bound is linked with "column1" so C should return information about "column1". The SQL Server ODBC driver finds information about "column1" (in this example) by creating SQL such as: select column1 from mytable where 1 = 2 then looking at the column details. Unfortunately, some SQL confuses SQL Server and it will generate SQL to find out about your parameters which examines the wrong columns and on rare occasions it may even generate totally incorrect SQL. The test case F demonstrates a couple of these. The upshot of this is that DBD::ODBC is sometimes lied to about parameters and will then bind your parameters incorrectly. This can lead to later errors when C is called. This happens most commonly when using parameters in SQL with sub-selects. For example: create table one (a1 integer, a2 varchar(10)) create table two (b1 varchar(10), b2 varchar(20)) insert into one values(1, 'aaaaaaaaaa') insert into two values('aaaaaaaaaa','bbbbbbbbbbbbbbbbbbbb') select b1, (select a2 from one where a2 = b1) from two where b2 = ? param 1 bound as 'bbbbbbbbbbbbbbbbbbbb' Clearly in this example, the one and only parameter is for two.b2 which is a varchar(20) but when SQL Server rearranges your SQL to describe the parameter it issues: select a2 from one where 1 = 0 and DBD::ODBC is told the parameter is a VARCHAR(10). In DBD::ODBC 1.17 this would then lead to a data truncation error because parameter 1 would be bound as 'bbbbbbbbbbbbbbbbbbbb' but with a column size of 10 as that is what C returned. DBD::ODBC 1.17_1 (and later) works around this problem for VARCHAR columns because it is obvious a VARCHAR parameter of length 20 cannot have a column size of 10 so the column size is increased to the length of the parameter. =item invalid value for the cast specification (22018) See the previous item as this follows on from that. See L on the microsoft web site for a bug you may have hit. In Perl the most common reason for this is that you have bound column data in SQL which does not match the column type in the database and the ODBC driver cannot perform the necessary conversion. DBD::ODBC mostly binds all column data as strings and lets the ODBC driver convert the string to the right column type. If you supply a string which cannot be converted to the native column type you will get this error e.g., if you attempt to bind a non-datetime string to a datetime column or a non-numeric string to a numeric column. A more difficult error (from that above in the previous item) can occur when SQL Server describes the parameter as totally the wrong type. The first example in F demonstrates this. SQL Server describes a VARCHAR parameter as an integer which DBD::ODBC has little choice to believe but when something like 'bbbbbbbbbb' is bound as an integer, SQL Server will then return an error like "invalid value for cast specification". The only way around this is to specifically name the parameter type. e.g., create table one (a1 integer, a2 varchar(20)) create table two (b1 double precision, b2 varchar(8)) insert into one values(1, 'aaaaaaaaaa') insert into two values(1, 'bbbbbbbb') select b1, ( select a2 from one where a1 = b1 ) from two where b2 = ? param 1 bound as 'bbbbbbbbbb' Clearly parameter 1 is a varchar(8) but SQL Server rearranges the SQL to: select a1 from one where 1 = 2 when it should have run select b2 from two where 1 = 2 As a result parameter 1 is described as an integer and this leads to the problem. To workaround this problem you would need to bind parameter 1 naming the SQL type of the parameter using something like: use DBI qw(:sql_types); bind_param(1, 'bbbbbbbbbb', SQL_VARCHAR); as omitting SQL_VARCHAR will cause DBD::ODBC to use the type C returned. See https://connect.microsoft.com/SQLServer/feedback/details/527188/paramater-datatype-lookup-returns-incorrectly and rt ticket 50852. =item problems with named parameters If you are using a MS SQL Server driver and named parameters to procedures be very careful to use then in the same order they are defined in the procedure. i.e., if you have a procedure like this: create procedure test @param1 varchar(50), @param2 smallint as begin .. end then ensure if you call it using named parameters you specify them in the same order they are declared: exec test @param1=?,@param2=? and not exec test @param2=?,@param1=? The reason for this is that all SQL Server drivers we have seen describe procedures parameters in the order they are declared and ignore the order they are used in the SQL. If you specify them out of order DBD::ODBC will get details on p1 which are really for p2 etc. This can lead to data truncation errors and all sorts of other problems it is impossible for DBD::ODBC to spot or workaround. =item Argument data type varchar is invalid for argument N of xxx function e.g., [SQL Server]Argument data type varchar is invalid for argument 2 of dateadd function. Some functions need specific argument types and as explained above some drivers have a lot of difficulties working out parameter types in function calls. In this example (also from the DBIx::Class test code) the SQL was like: SELECT DATEADD(hour, ?, me.date_created) FROM mytable me where me.id = ? The second argument to dateadd needs to be an integer. There are 2 ways around this: =over =item bind parameter 1 as an SQL_INTEGER =item CAST(? as integer) =back =item other bugs in ODBC Drivers with parameter support See the question "Why do I get data truncated error from SQL Server when inserting with parameters?" above. These errors are often because of bugs in the MS SQL Server ODBC driver in its SQLBindParameter implementation and can be worked around by specifying a type at bind time. e.g., Instead of: my $s = prepare(q/some sql with parameters/); $s->execute($param1, $param2); try: my $s = prepare(q/some sql with parameters/); $s->bind_param(1, $param1, {TYPE => SQL_VARCHAR}); $s->bind_param(2, $param2, {TYPE => SQL_VARCHAR}); $s->execute; See https://connect.microsoft.com/SQLServer/feedback/details/527188/paramater-datatype-lookup-returns-incorrectly and rt ticket 50852. =item example of where overriding parameter type is required Here is an example sent to me from someone (wesm) using DBIx::Class illustrating a problem with bound parameters in a complex bit of SQL and date columns: use DBI qw(:sql_types); use strict; use warnings; my $h = DBI->connect(); eval{ $h->do(q/DROP TABLE odbctest/) }; $h->do(q/CREATE TABLE odbctest ( id integer NOT NULL IDENTITY (1,1), name nvarchar(50) NULL, adate date NULL )/); my $s = $h->prepare(q/ set identity_insert odbctest on; insert into odbctest (id, name, adate) values (?,?,?); set identity_insert odbctest off; select scope_identity(); /); my $bug = undef; # fails #= '2011-03-21'; # works my @values = ( [ 1000, 2000, ], [ 'frew', 'wes', ], [ $bug, '2009-08-10', ], ); my $i = 1; my $tuple_status; $s->bind_param_array($i++, $_) for @values; $s->execute_array({ArrayTupleStatus => $tuple_status}); $s = $h->prepare(q/select * from odbctest where id = ?/); foreach (1000, 2000) { $s->execute($_); print DBI::dump_results($s); } When I first ran this with SQL Server native client (pre native client 10) on Windows I got DBD::ODBC::st execute_array failed: [Microsoft][SQL Native Client]Syntax error, permission violation, or other nonspecific error (SQL-42000) [err was 1 now 2000 The call to C fails because the driver cannot rearrange the SQL into a select allowing it to identify the column. DBD::ODBC then falls back on the default parameter bind type and it still fails. When you switch to the native client 10 driver (newer) it fails with optional feature not implemented. Now I'm only guessing here but the date column type was added in SQL Server 2008 and I'm guessing when it was added they forgot to add something in the driver for full date support. If you use native client 10 and change the insert code to: my @values = ( [ 1000, 2000, ], [ 'frew', 'wes', ], [ $bug, '2009-08-10', ], ); my @types = ( SQL_VARCHAR, SQL_VARCHAR, SQL_DATE); my $i = 1; my $tuple_status; $s->bind_param_array($i++, $_, {TYPE => $types[$i-2]}) for @values; $s->execute_array({ArrayTupleStatus => $tuple_status}); you workaround the problem. It is possible when you read this now that DBIx::Class has worked around this problem. =back =head2 Why do I get data truncated errors with type_info and Firebird ODBC Driver? The Firebird ODBC driver from the open source Firebird project version 02.01.0100 (as reported in ODBC via SQLGetInfo) seems to report the length of the TYPE_NAME field for types -9 'VARCHAR(x) CHARACTER SET UNICODE_' and -10 'BLOB SUB_TYPE TEXT CHARACTER SET ' as 34 but then attempt to write 36 characters to them. Unfortunately this is an error via DBI and due to the way type_info is implemented (where it retrieves all the types the first time you query any type) it stops you querying any type even those which are not -9 or -10. Unless you need the TYPE_NAME field this should not matter to you in which case you can set LongTruncOk before retrieving type_info: my $ti; { local $dbh->{LongTruncOk} = 1; $ti = $dbh->type_info_all; } You will need to do something similar if you use the statements attributes like TYPE the first time you use them. I reported this issue at http://tracker.firebirdsql.org/browse/ODBC-122. =head2 Why do I get "symbol lookup error for SQLAllocHandle" on Ubuntu/Debian? You've probably got iODBC installed and the Makefile.PL found it before unixODBC but on some Ubuntu and Debian distributions iODBC is not installed with the symbolic link libiodbc.so e.g., /usr/lib contains lrwxrwxrwx 1 root root 22 2011-01-04 20:00 libiodbcinst.so.2 -> libiodbcinst.so.2.1.18 -rw-r--r-- 1 root root 67140 2009-11-11 00:42 libiodbcinst.so.2.1.18 lrwxrwxrwx 1 root root 18 2011-01-04 20:00 libiodbc.so.2 -> libiodbc.so.2.1.18 -rw-r--r-- 1 root root 305108 2009-11-11 00:42 libiodbc.so.2.1.18 but no libiodbc.so. You could create the symbolic link: cd /usr/lib sudo ln -s libiodbc.so.2 libiodbc.so however, you'll probably have other problems so better to use unixODBC. Ensure you've installed unixodbc and unixodbc-dev packages and re-run Makefile.PL with the -x argument to prefer unixODBC. =head2 How do I set the application and workstation names for MS SQL Server? MS SQL Server supports 2 additional connection attributes which you can use to set application name and the workstation name: B specifies the application name recorded in the program_name column in master.dbo.sysprocesses. B sets the workstation name recorded in the hostname column in master.dbo.sysprocesses. To set these add these attributes to the call to DBIs connect like this: my $h = DBI->connect('dbi:ODBC:DSN=mydsn;APP=appname;WSID=wsname', 'dbuser', 'dbpass'); =head2 Why do I get "The specified DSN contains an architecture mismatch between the Driver and Application" on Windows? You've got a 64 bit Windows. Your attempting to connect to a SYSTEM DSN. You are trying to connect to a 64bit SYSTEM DSN from a 32 bit application or vice versa. See my initial experience http://www.martin-evans.me.uk/node/81. More confusing is if you use the data_sources method, that calls SQLDataSources and the list returned matches the architecture of your Perl binary and yet when you attempt to connect to a DSN for the wrong architecture you get this error instead of the more sensible (and usual) data source not found. NOTE: User DSNs don't exhibit this - they just seem to pick the right driver. See also http://www.easysoft.com/developer/interfaces/odbc/64-bit.html =head2 Why does my transaction get committed when I disable AutoCommit? If you are doing something like this: {local $h->{AutoCommit} = 0; $h->do(q/insert into mje values(1)/); } then what really happens is AutoCommit is disabled at the start of the block and when the block is exited AutoCommit is re-enabled. In ODBC enabling AutoCommit will commit any outstanding transaction. Don't do this. Instead, either rollback or commit at the end of the block or leave AutoCommit alone and call begin_work/commit/rollback yourself in the block. =head2 Why do I get the wrong row count from execute_for_fetch? Some drivers don't return the correct value from SQLRowCount when binding arrays of parameters. e.g., freeTDS 8 and 0.91 seems to return a 1 for each batch. e.g., if you run a SQL insert to insert 15 rows and pass an array of 15 rows to execute_array with the default array size of 10 it takes 2 batches to execute all the parameters and freeTDS will return 1 row affected for each batch hence returns 2 instead of 15. See rt 75687. =head2 Why are my pound signs (£), dashes and quotes (and other characters) returned garbled The first question in response is why do you think what you got back was incorrect? Did you print the data to a terminal and it looks wrong, or perhaps sent it to a browser in a piece of CGI or even wrote it to a file? The mantra you need to stick to is decode all input to Perl and encode all output but DBD::ODBC does the decoding of data retrieved from the database for you. The classic case I keep seeing I've repeated here because it illustrates the most common problem. Database is MS SQL Server, data is viewed in the management console and looks good but when retrieved via DBD::ODBC it looks wrong. The most common cause of this is the data you've retrieved is stored as unicode in Perl and you output it to somewhere without encoding it first with an encoding appropriate for the output e.g., you printed it from a windows terminal without setting the STDOUT encoding to cp1252 (or whatever your codepage in your terminal is). The first thing I'd suggest is to print the data with Data::Dumper and if any of the output contains \x{NNNN} your data is unicode (there are other ways like using DBI's data_string_desc utility method or Encode's is_utf8). Bear in mind that in a unicode build of DBD::ODBC (the default on Windows) all string data is retrieved as unicode. When you output your unicode data anywhere you need to encode it with Encode::encode e.g., binmode(STDOUT, ":encoding(cp1252)"); Just because you think you are working in a single codepage does not mean the data you retrieve will be returned as single byte characters in that codepage. DBD::ODBC (in a unicode build) retrieves all string data as wide (unicode) characters and most ODBC drivers will convert the codepage data returned by the database to unicode. For example, your column is windows-1252 codepage and contains a euro symbol which is character 0xA3. When retrieved by DBD::ODBC, the ODBC driver will convert this to unicode character 0x20ac. If you output this without encoding you'll likely see rubbish. If you are absolutely sure you are using a single code page and don't want to be bothered with unicode, look up the odbc_old_unicode attribute but better still, rebuild DBD::ODBC without unicode support using: perl Makefile.PL -nou =head2 Does DBD::ODBC support the new table valued parameters? Not yet. Patches welcome. =head2 Why do I get "COUNT field incorrect or syntax error (SQL-07002)"? In general this error is telling you the number of parameters bound or passed to execute does not match the number of parameter markers in your SQL. However, this can also happen if you attempt to use too many parameters. For instance, for MS SQL Server (http://msdn.microsoft.com/en-us/library/ms143432.aspx) the maximum is 2100. =head2 Why are my column names truncated to 30 characters when using freeTDS? You should note this is only an observed answer. The person who reported this to me was using MS SQL Server 2008. If he set TDS protocol 6.0, 9.0 or 10.0 his column names were truncated to 30 charatcers. If he specified TDS protocol 7.0 or 8.0 his column names were not truncated. We guessed his server did not support protocols 9.0 or 10.0 and fall back to 6.0 where column names are restricted to 30 characters. =head2 Why are my doubles truncated from MS Access DB? If you have a double column in your MS Access DB and the retrieved values are truncated you have probably hit a known (and fixed) bug in the MS Access ODBC driver. Typical truncation lookes like this: 8.93601020357839E-06 returned as E-6 If you have the Microsoft Access 2010 accdb ODBC driver (v14) or older then try upgrading to the 2013 (v15) driver as my experiments showed that fixed the issue. Search for "Microsoft Access 2013 redistributable engine" and download the appropriate version for your operating system. =head2 Why is my (long)binary data inserted into MS Access incorrect? A typical example of this is trying to insert binary data into a MS Access table using parameters and the data ends up full of null bytes. The MS Access ODBC driver does not support the ODBC API SQLDescribeParam so DBD::ODBC has no idea what parameter type to use when binding placeholders. By default, DBD::ODBC defaults in this situations to SQL_CHAR type. However, when you bind binary data as SQL_CHAR with the MS Access ODBC Driver the data stored in your DB will no longer be the data you expect as the driver translates your data bound as a string to binary. The way around this is to bind the parameter specifying the binary type e.g., $sth->bind_param(1, undef, SQL_LONGVARBINARY) or one of the other binary types. Note, that you don't actually have to pass the parameter into bind_param as parameter types are sticky so (as in this example) you can tell DBD::ODBC to use a different type in the bind_param call but still go on to pass the paramter into the execute method. =head1 AUTHOR Parts of this document were written by Tim Bunce, Jeff Urlwin and Martin J. Evans. =head1 LICENSE This library is free software; you can redistribute it and/or modify it under the same terms as Perl itself, either Perl version 5.8.7 or, at your option, any later version of Perl 5 you may have available. =cut DBD-ODBC-1.61/TO_DO0000644000175000017500000001147512513424262012745 0ustar martinmartinuse strict; =head1 NAME DBD::ODBC::TO_DO - Things to do in DBD::ODBC As of $LastChangedDate: 2010-10-08 17:00:31 +0100 (Fri, 08 Oct 2010)$ $Revision: 10667 $ =cut =head1 Todo Better/more tests on multiple statement handles which ensure the correct number of rows Better/more tests on all queries which ensure the correct number of rows and data Better tests on SQLExecDirect/do Keep checking Oracle's ODBC drivers for Windows to fix the Date binding problem There is a Columns private ODBC method which is not documented. Add support for sending lobs in chunks instead of all in one go. Although DBD::ODBC uses SQLParamData and SQLPutData internally they are not exposed so anyone binding a lob has to have all of it available before it can be bound. Why does level 15 tracing of any DBD::ODBC script show alot of these: !!DBD::ODBC unsupported attribute passed (PrintError) !!DBD::ODBC unsupported attribute passed (Username) !!DBD::ODBC unsupported attribute passed (dbi_connect_closure) !!DBD::ODBC unsupported attribute passed (LongReadLen) Add a perlcritic test - see DBD::Pg Anywhere we are storing a value in an SV that we didn't create (and thus might have magic) should probably set magic. Add a test for ChopBlanks and unicode data Add some private SQLGetInfo values for whether SQL_ROWSET_SIZE hack works etc. How can you tell a driver supports MARS_CONNECTION. Might be able to detect MARS capable with SS_COPT_MARS_ENABLED Bump requirement to Test::Simple 0.96 so we can use subtest which is really cool and reorganise tests to use it. 0.96, because it seems to be the first really stable version of subtest. Add more Oracle-specific tests - like calling functions/procedures and in/out params. Download rpm package from here -> http://download.opensuse.org/repositories/devel:/languages:/perl/openSUSE_11.4/src/ and see what changes they are making (especially Makefile.PL) to see if we might need to include them. DRIVER and DSN in ODBC connection attributes should be case insensitive and they are not - they are strcmped against DRIVER/DSN - check! Some people still have problems with iODBC being picked up before unixODBC. Various ideas from irc: mje: what does undefined symbol: SQLGetFunctions mean? mje: do I have iodbc conflicting again? <@Caelum> yeah I think that's it perl Makefile.PL -x * gfx is now known as gfx_away mje: kinda sucks I have to do that every time, every auto-upgrade borks it can't you just uninstall iODBC is this OSX? <@Caelum> it's Debian, I think I have iODBC because amarok needs it oh, so use -x mje: what if you just compile a quick program in the Makefile itself, and determine whether you need -x or not? <@ribasushi> I've seen other things to that - e.g. Time::HiRes could do, I guess I could revisit this - problem was Jens (I think) sent me a patch to prefer iODBC over unixODBC and told me it was required for something - can't remember right now otherwise, I'd just change the default to unixODBC then iODBC <@Caelum> maybe an env var in addition to -x? then I could just put it in my .zshrc like PERL_DBD_ODBC_PREFER_UNIXODBC yeah, could do that too mje: the point is you can't really "prefer" one or the other - you need to check for a specific -dev existence, instead of assuming it's there if the package itself is there but iODBC and unixODBC dev packages both provide sql.h etc you should see the code I already have to try and fathom this out - it is massive mje: by check I meant - compile something small and quick to run some unixODBC's don't include odbc_config, some do, some do but are too old and don't have switch fred to compile correctly I need to find odbc_config or iodbc_config so catch 22 <@ribasushi> but here's the kicker - you compile "something" - if it works - you found things correctly <@ribasushi> if the compile/execution fails - you "found" wrong I don't think it is that simple - I need to know it is iODBC or not and a whole load of other things there is a lot of history in the tests for supporting older systems <@ribasushi> https://metacpan.org/source/ZEFRAM/Time-HiRes-1.9724/Makefile.PL#L334 mje: ^^ I was referring to stuff like this <@ribasushi> without changing the heuristics at all, simply looping the whole thing with different params depending on the config outcome <@ribasushi> it's ugly, but can end up rather effective I'll look at it again - added to TO_DO - where does try_compile_and_link come from? mje: it's all part of this makefile, it's a good read rather clean ignore - sorry Jens suggested Config::AutoConf Cancel is not documented ODBC 3.8: http://msdn.microsoft.com/en-us/library/ee388581%28v=vs.85%29.aspxDBD-ODBC-1.61/dbdimp.h0000644000175000017500000003177412554127604013537 0ustar martinmartin/* * Copyright (c) 1997-2001 Jeff Urlwin * portions Copyright (c) 1997 Thomas K. Wenrich * portions Copyright (c) 1994,1995,1996 Tim Bunce * portions Copyright (c) 1997-2001 Jeff Urlwin * portions Copyright (c) 2001 Dean Arnold * portions Copyright (c) 2007-2013 Martin J. Evans * * You may distribute under the terms of either the GNU General Public * License or the Artistic License, as specified in the Perl README file. * */ /* some constants for driver specific types */ #define MS_SQLS_XML_TYPE -152 typedef struct imp_fbh_st imp_fbh_t; /* This holds global data of the driver itself. */ struct imp_drh_st { dbih_drc_t com; /* MUST be first element in structure */ SQLHENV henv; int connects; /* connect count */ }; /* Define dbh implementor data structure This holds everything to describe the database connection. */ struct imp_dbh_st { dbih_dbc_t com; /* MUST be first element in structure */ SQLHENV henv; /* copy from imp_drh for speed */ SQLHDBC hdbc; char odbc_ver[20]; /* ODBC compat. version for driver */ SQLSMALLINT max_column_name_len; /* flag to ignore named parameters */ int odbc_ignore_named_placeholders; /* flag to set default binding type (experimental) */ SQLSMALLINT odbc_default_bind_type; /* force bound parameters to be this type */ SQLSMALLINT odbc_force_bind_type; /* flag to see if SQLDescribeParam is supported */ int odbc_sqldescribeparam_supported; /* flag to see if SQLMoreResults is supported */ int odbc_sqlmoreresults_supported; /* flag to work around SQLServer bug and defer binding until last possible moment - execute instead of bind time. Should only be set for SQL Server currently. The problem was that SQL Server was not handling binding an undef then binding the real value. This happened with older SQLServer 2000 drivers on varchars and is still happening with date */ int odbc_defer_binding; /* force rebinding the output columns after each execute to resolve some issues where certain stored procs can return multiple result sets */ int odbc_force_rebind; SQLINTEGER odbc_query_timeout; /* point at which start using SQLPutData */ IV odbc_putdata_start; /* whether built WITH_UNICODE */ int odbc_has_unicode; /* flag to set asynchronous execution */ int odbc_async_exec; /* flag for executing SQLExecDirect instead of SQLPrepare and SQLExecute. Magic happens at SQLExecute() */ int odbc_exec_direct; /* flag indicating if we should pass SQL_DRIVER_COMPLETE to SQLDriverConnect */ int odbc_driver_complete; /* used to disable describing paramters with SQLDescribeParam */ int odbc_describe_parameters; /* flag to store the type of asynchronous execution the driver supports */ SQLUINTEGER odbc_async_type; SV *odbc_err_handler; /* contains the error handler coderef */ /* The out connection string after calling SQLDriverConnect */ SV *out_connect_string; /* default row cache size in rows for statements */ int RowCacheSize; /* if SQL_COLUMN_DISPLAYS_SIZE or SQL_COLUMN_LENGTH are not defined or * SQLColAttributes for these attributes fails we fallback on a default * value. */ SQLLEN odbc_column_display_size; /* Some databases (like Aster) return all strings UTF-8 encoded. * If this is set (1), SvUTF8_on() will be called on all strings returned * from the driver. */ int odbc_utf8_on; /* save the value passed to odbc_SQL_ROWSET_SIZE so we can return * it without calling SQLGetConnectAttr because some MS driver * managers (e.g., since MDAC 2.7 and on 64bit Windows) don't allow * you to retrieve it. Normally, we'd just say stop fetching it but * until DBI 1.616 DBI itself issues a FETCH if you mention * odbc_SQL_ROWSET_SIZE in the connect method.*/ SQLULEN rowset_size; /* * We need special workarounds for the following drivers. To avoid * strcmping their names every time we do it once and store the type here */ enum { DT_DONT_CARE, DT_SQL_SERVER, /* SQLSRV32.DLL */ DT_SQL_SERVER_NATIVE_CLIENT, /* sqlncli10.dll | SQLNCLI.DLL */ DT_MS_ACCESS_JET, /* odbcjt32.dll */ DT_MS_ACCESS_ACE, /* ACEODBC.DLL */ DT_ES_OOB, /* Easysoft OOB */ DT_FIREBIRD, /* Firebird OdbcFb */ DT_FREETDS /* freeTDS libtdsodbc.so */ } driver_type; char odbc_driver_name[80]; char odbc_driver_version[20]; char odbc_dbms_name[80]; char odbc_dbms_version[80]; int odbc_batch_size; /* rows in a batch operation */ int odbc_array_operations; /* enable/disable inbuilt execute_for_fetch etc */ /*int (*taf_callback_fn)(SQLHANDLE connection, int type, int event);*/ SV *odbc_taf_callback; /* If the driver does not support SQLDescribeParam or SQLDescribeParam fails we fall back on a default type. However, some databases need that type to be different depending on the length of the column. MS SQL Server needs to switch from VARCHAR to LONGVARCHAR at 4000 bytes whereas MS Access at 256. We set the switch point once we know the database. */ int switch_to_longvarchar; /* Initially -1 and if someone sets ReadOnly to true it becomes 1. Even if the ODBC driver cannot set SQL_ATTR_ACCESS_MODE but ReadOnly is set to 1, read_only is 1 and that is returned without asking the ODBC Driver what it is currently set to, Of course setting it to false works similarly. */ int read_only; int catalogs_supported; SQLUINTEGER schema_usage; }; /* Define sth implementor data structure */ struct imp_sth_st { dbih_stc_t com; /* MUST be first element in structure */ HENV henv; /* copy for speed */ HDBC hdbc; /* copy for speed */ SQLHSTMT hstmt; int moreResults; /* are there more results to fetch? */ int done_desc; /* have we described this sth yet? */ int done_bind; /* have we bound the columns yet? */ /* Input Details */ char *statement; /* sql (see sth_scan) */ HV *all_params_hv; /* all params, keyed by name */ AV *out_params_av; /* quick access to inout params */ int has_inout_params; UCHAR *ColNames; /* holds all column names; is referenced * by ptrs from within the fbh structures */ UCHAR *RowBuffer; /* holds row data; referenced from fbh */ SQLLEN RowBufferSizeReqd; imp_fbh_t *fbh; /* array of imp_fbh_t structs */ SQLLEN RowCount; /* Rows affected by insert, update, delete * (unreliable for SELECT) */ SV *param_sts; /* ref to param status array for array bound PHs */ int params_procd; /* to recv number of parms processed by an SQLExecute() */ SV *row_sts; /* ref to row status array for array bound columns */ UDWORD rows_fetched; /* actual number of rows fetched for array binding */ UDWORD max_rows; /* max number of rows per fetch for array binding */ UWORD *row_status; /* row indicators for array binding */ int odbc_ignore_named_placeholders; /* flag to ignore named parameters */ SQLSMALLINT odbc_default_bind_type; /* flag to set default binding type (experimental) */ SQLSMALLINT odbc_force_bind_type; /* force bound parameters to be this type */ int odbc_exec_direct; /* flag for executing SQLExecDirect instead of SQLPrepare and SQLExecute. Magic happens at SQLExecute() */ int odbc_force_rebind; /* force rebinding the output columns after each execute to */ /* resolve some issues where certain stored procs can return */ /* multiple result sets */ SQLINTEGER odbc_query_timeout; IV odbc_putdata_start; IV odbc_column_display_size; int odbc_utf8_on; int odbc_describe_parameters; SQLUSMALLINT *param_status_array; /* array for execute_for_fetch parameter status */ SQLULEN params_processed; /* for execute_for_fetch */ int odbc_batch_size; /* rows in a batch operation */ int odbc_array_operations; /* enable/disable inbuilt execute_for_fetch etc */ int allocated_batch_size; /* size used for last batch */ }; #define IMP_STH_EXECUTING 0x0001 struct imp_fbh_st { /* field buffer EXPERIMENTAL */ imp_sth_t *imp_sth; /* 'parent' statement */ /* field description - SQLDescribeCol() */ UCHAR *ColName; /* zero-terminated column name */ SQLSMALLINT ColNameLen; SQLULEN ColDef; /* precision */ SQLSMALLINT ColScale; SQLSMALLINT ColSqlType; SQLSMALLINT ColNullable; SQLLEN ColLength; /* SqlColAttributes(SQL_COLUMN_LENGTH) */ SQLLEN ColDisplaySize; /* SqlColAttributes(SQL_COLUMN_DISPLAY_SIZE) */ /* Our storage space for the field data as it's fetched */ SWORD ftype; /* external datatype we wish to get. * Used as parameter to SQLBindCol(). */ UCHAR *data; /* points into sth->RowBuffer */ SQLLEN datalen; /* length returned from fetch for single row. */ unsigned long bind_flags; /* flags passed to bind_col */ /* Be careful: bind_flags mix our flags like ODBC_TREAT_AS_LOB with DBI's DBIstcf_DISCARD_STRING and DBIstcf_STRICT. If you add a flag make sure it does not clash */ #define ODBC_TREAT_AS_LOB 0x100 IV req_type; /* type passed to bind_col */ /* have we already bound this column because if we have you cannot change the type afterwards as it is not rebound */ unsigned int bound; }; typedef struct phs_st phs_t; /* scalar placeholder */ struct phs_st { /* scalar placeholder */ SQLUSMALLINT idx; /* index number of this param 1, 2, ... */ SV *sv; /* the scalar holding the value */ int sv_type; /* original sv type at time of bind */ char *sv_buf; /* pointer to sv's data buffer */ int svok; /* result of SvOK on output param at last bind time */ SQLULEN param_size; /* value returned from SQLDescribeParam */ int describe_param_called; /* has SQLDescribeParam been called */ SQLRETURN describe_param_status; /* status return from last SQLDescribeParam */ int biggestparam; /* if sv_type is VARCHAR, size of biggest so far */ bool is_inout; /* is this an output parameter? */ IV maxlen; /* max possible len (=allocated buffer) for */ /* out parameters */ SQLLEN strlen_or_ind; /* SQLBindParameter StrLen_or_IndPtr argument */ /* containg parameter length on input for input */ /* and returned parameter size for output params */ SQLLEN *strlen_or_ind_array; /* as above but an array for execute_for_fetch */ char *param_array_buf; /* allocated buffer for array of params */ SQLSMALLINT requested_type; /* type optionally passed in bind_param call */ SQLSMALLINT value_type; /* SQLBindParameter value_type - a SQL C type */ SQLSMALLINT described_sql_type; /* sql type as described by SQLDescribeParam */ SQLSMALLINT sql_type; /* the sql type of the placeholder */ /* Remaining values passed to SQLBindParameter that we record to detect if we need to rebind due to changed args */ UCHAR *bp_value_ptr; /* ptr to actual value */ SQLSMALLINT bp_d_digits; /* decimal digits */ SQLULEN bp_column_size; SQLLEN bp_buffer_length; char name[1]; /* struct is malloc'd bigger as needed */ }; /* These defines avoid name clashes for multiple statically linked DBD's */ #define dbd_init odbc_init #define dbd_db_login odbc_db_login #define dbd_db_login6 odbc_db_login6 #define dbd_data_sources dbd_data_sources /* * Not defined by DBI * #define dbd_db_do odbc_db_do */ #define dbd_db_login6_sv odbc_db_login6_sv #define dbd_db_commit odbc_db_commit #define dbd_db_rollback odbc_db_rollback #define dbd_db_disconnect odbc_db_disconnect #define dbd_db_destroy odbc_db_destroy #define dbd_db_STORE_attrib odbc_db_STORE_attrib #define dbd_db_FETCH_attrib odbc_db_FETCH_attrib #define dbd_st_prepare odbc_st_prepare #define dbd_st_prepare_sv odbc_st_prepare_sv /*#define dbd_st_rows odbc_st_rows*/ #define dbd_st_execute odbc_st_execute #define dbd_st_execute_iv odbc_st_execute_iv #define dbd_st_fetch odbc_st_fetch #define dbd_st_finish odbc_st_finish #define dbd_st_destroy odbc_st_destroy #define dbd_st_blob_read odbc_st_blob_read #define dbd_st_STORE_attrib odbc_st_STORE_attrib #define dbd_st_FETCH_attrib odbc_st_FETCH_attrib #define dbd_describe odbc_describe #define dbd_bind_ph odbc_bind_ph #define dbd_error odbc_error #define dbd_discon_all odbc_discon_all #define dbd_st_tables odbc_st_tables #define dbd_st_primary_keys odbc_st_primary_keys #define dbd_db_execdirect odbc_db_execdirect #define dbd_st_bind_col odbc_st_bind_col /* end */ DBD-ODBC-1.61/README.RH90000644000175000017500000000113212254016152013361 0ustar martinmartinThis contents of this file are very dated now and I suspect the issues mentioned are no longer relevant. 2/28/2004 There have been some reports that Red Hat 9 has build problems. This is manifested by build problems and the Makefile generated having lots of issues. From what I can tell, as I don't have this problem on my RH9 system, this is mostly due to people setting the LANG environment variable to something that is utf8 (again, as far as I can tell). One example is LANG=en_US.utf8. By simply changing that to LANG=en_US, I have reports that the Makefile generated works. Regards, Jeff DBD-ODBC-1.61/Changes0000644000175000017500000033475613614567555013443 0ustar martinmartin# satisfy kwalitee test use strict; =encoding utf8 DBD::ODBC::Changes - Log of significant changes to the DBD::ODBC 1.61 2020-01-30 [BUG FIXES] Fix 12blob.t test by pali Fix searching for ODBC libraries in system by pali (#15) [ENHANCEMENTS] use PERL_NO_GET_CONTEXT for more performance by markusbeth (#13) [MISCELLANEOUS] Fix travis builds for older Perls by pali 1.60 2018-10-31 [BUG FIXES] Merged pull request 11 from audun which fixes some issues with the AutoCommit flag on commit and rollback. [MISCELLANEOUS] Merged pull request 10 from vadz which fixed typo (affecting license) in README.md. 1.59 2018-08-10 [BUG FIXES] git issue 8. Setting odbc_utf8_on didn't work properly. Thanks to David Wheeler for reporting and helping to debug. 1.58 2018-03-01 [MISCELLANEOUS] Various changes to the test suite to get better results with Postgres 1.57 2018-03-01 [MISCELLANEOUS] Merged pull request 6 from genio which allows Makefile.PL argument -u to be set via the environment variable DBD_ODBC_UNICODE This version was removed from CPAN because it was uploaded with a nasty bug in the diagnostics code. 1.56 2016-10-06 Full release of the 1.53 development series One version skipped because of indexing problems. 1.53_2 2016-02-03 [MISCELLANEOUS] Add new FAQs 1.53_1 2015-10-16 [BUG FIXES] Strictly speaking this is a bug fix to DBI and not DBD::ODBC but DBI now supports 64 bit row counts where an IV in perl is 64 bits. However, it necessitated changes to DBD::ODBC to pick up the fix. odbc_rows (my workaround since 2012) is still supported but should no longer be required so long as you use this DBD::ODBC and DBI 1.633_92 or above. [INTERNALS] Removed dbd_st_rows and now setting DBIc_ROW_COUNT. [DOCUMENTATION] Add tables and table_info section to deviations from the DBI spec. [MISCELLANEOUS] Change column name in t/rt_101579.t as "method" is a reserved word in. Teradata Thanks to Zhenyi Zhou. Remove duplicate dynamic_config from META.yml. 1.52 2015-04-15 [MISCELLANEOUS] Changes to the test suite to make it run better with Postgres thanks to Greg Sabino Mullane. 1.51_4 2015-01-18 [BUG FIXES] Numerous errors in the test suite (with SQLite ODBC driver) mostly down to not creating the test table first. [MISCELLANEOUS] Try and make the test suite run ok for SQLite ODBC driver so I can use it in travis-ci. 1.51_3 2015-01-17 [BUG FIXES] RT101579 - using bound input parameters for numeric columns (e.g., SQL_NUMERIC) only works the first time and will quite likely fail with "string data, right truncation" on the second and subsequent calls to execute. Thanks to Laura Cox for finding. 1.51_2 2014-11-19 [BUG FIXES] The table_info method (ANSI version only) was incorrectly passing the table name for the type argument. I think this bug was introduced last year. Thanks to Nick Gorham for spotting and providing a fix. 1.51_1 2014-11-14 [BUG FIXES] RT100186 - handle VARBINARY(MAX) parameters with SQL Server native client. Identify "libmsodbcsql*" as the MS ODBC Driver for Linux as there are some specific workarounds for MS Native Client ODBC driver. 1.50 2014-07-25 [BUG FIXES] The 80_odbc_diags.t test could fail if a driver fails a table does not exist test in the prepare instead of the execute. 1.49_4 2014-07-08 [BUG FIXES] Fixed sql_type_cast.t test which assumed column aliases which stay lowercase. Fixed 87_odbc_lob_read.t test which did not bow out of the test properly if the database was not MS SQL Server. [DOCUMENTATION] Revised the query notification example and documentation. Added a link to a better Query Notification article. 1.49_3 2014-05-01 [CHANGE IN BEHAVIOUR] As warned years ago, this release removes the odbc_old_unicode attribute. If you have a good reason to use it speak up now before the next non-development release. [BUG FIXES] Fix rt89255: Fails to create test table for tests using PostgreSQL odbc driver. Change test suite to fallback on PRECISION if COLUMN_SIZE is not found. [ENHANCEMENTS] Added support for MS SQL Server Query Notification. See the new section in the pod. Added a currently undocumented (and experimental) odbc_describe_param method on a statement handle which takes a parameter number as the only argument and returns an array of the data type, parameter size, decimal digits and nullable (as per SQLDescribeParam). [DOCUMENTATION] Added FAQ on truncated column names with freeTDS. [MISCELLANEOUS] I have removed the "experimental" tag for odbc_getdiagfield and odbc_getdiagrec methods. 1.49_2 2014-04-26 [BUG FIXES] Change to data_sources in 1.49_1 could lead to a compile error since data_sources was not returning a value if an error occurred. 1.49_1 2014-04-25 [BUG FIXES] If you had a lot of DSNs on Windows (more than 280 but it depends on the length of their names) and called the data_sources method it could crash your script. Code internally changed to stop putting the DSNs returned on the stack. [CHANGE IN BEHAVIOUR] As warned years ago, the private data_sources method has been removed - use DBI one instead. [MISCELLANEOUS] Added FAQ entry of maximum number of allowed parameters. 1.48 2014-03-03 [MISCELLANEOUS] Manifest has wrong filename for 90_trace_flags.t Forgot to remove warning from ODBC.pm that this is a development release and unicode change when I released 1.47. 1.47 2014-02-19 Full release of the 1.46 development releases. [MISCELLANEOUS] Just some tidying up of dbdimp.c - shouldn't make a difference to anyone. Further changes to this change file to make it CPAN::Changes spec. NOTE the changes.cpanhq.com site does not yet support "unknown" for dates. 1.46_2 2013-12-17 [BUG FIXES] When built with unicode support and odbc_old_unicode is not enabled columns reported as SQL_LONGVARCHAR were not by default bound as SQL_WCHAR and hence were not returned correctly unless the bind was overridden. [MISCELLANEOUS] Added test 90_trace_flag.t 1.46_1 2013-11-16 [CHANGE IN BEHAVIOUR] As warned in release 1.45, the binding of unicode parameters to char/varchar columns has changed significantly. If you don't attempt to insert unicode into char/varchar columns or if you only inserted unicode into nchar/nvarchar columns you should see no difference. From this release, unicode data inserted into char/varchar/longvarchar columns is bound as SQL_WCHAR and not whatever the driver reports the parameter as (which is mostly SQL_CHAR). Previously if DBD::ODBC received an error or (SQL_SUCCESS_WITH_INFO) from an ODBC API call and then the driver refused to return the error state/text DBD::ODBC would issue its own error saying "Unable to fetch information about the error" and state IM008. That state was wrong and has been changed to HY000. [BUG FIXES] Some drivers cannot support catalogs and/or schema names in SQLTables. Recent changes set the schema/catalog name to the empty string (good reasons below) which causes "optional feature not implemented" from MS Access (which does not support schemas - even for a simply ping (which uses SQLTables)). Now we call SQLCATALOG_NAME and SQLSCHEMA_USAGE on connect to ascertain support which modifies SQLTables call. [MISCELLANEOUS] Added test 45_unicode_varchar.t for MS SQL Server only so far. 1.45 2013-10-28 [CHANGE IN BEHAVIOUR] There is no intentional change in behaviour in this release but I'm adding a warning that the next development release is highly liking to contain some significant unicode changes in behaviour to fix some bugs which have been around for quite a long time now. [BUG FIXES] If an SQLExecute ODBC API call returned SQL_NO_DATA DBD::ODBC was still calling SQLError (which was a waste of time). Since 1.44_1 odbc_out_connect_string stopped returning anything. [MISCELLANEOUS] Added another link to resources for supplementary characters. Added 1 more test to 20SqlServer.t for update statement. Small changes to 20SqlServer.t test to skip some tests and note the problem if SQLExecute returns SQL_NO_DATA on a non searched update. 1.44_4 2013-10-16 [BUG FIXES] Fix method redefinition warnings in threads on Perl >= 5.16 thanks Dagfinn Ilmari Mannsåker [MISCELLANEOUS] Changed this Changes file to be closer to the version 0.03 change file spec. Added t/version.t test. Added recommends Test::Version. Updates to the odbc_more_results pod to help clarify its use after some confusion was seen in a perlmonks thread. 1.44_3 2013-10-11 [CHANGE IN BEHAVIOUR] If you attempt to set the ReadOnly attribute and the underlying ODBC driver does not support this (SQL_SUCCESS_WITH_INFO and "option value changed" is returned) a warning is issued. However, until RT 89015 "You cannot issue a warning in the STORE method" in DBI is resolved you won't get this warning. As DBI 1.628 it is not resolved. I've only seen the SQLite ODBC driver do this. If you set ReadOnly and the underlying ODBC driver does not support this then any subsequent attempts to fetch the ReadOnly attribute will return the value you set. [BUG FIXES] The 82_table_info test assumed all database and ODBC Drivers supported catalogs and schemas (some don't). Use get_info to find out if catalogs and schemas are supported before running these tests. The rt_79190.t could incorrectly fail if your test DSN contained the DRIVER attribute. [MISCELLANEOUS] Added RedHat spec file to examples courtesy of Michiel Beijen. Added "use strict" to FAQ/Changes etc to quieten kwalitee test. Added a workaround in the test suite for a probable bug in the postgres ODBC driver which does not return COLUMN_SIZE from SQLGetTypeInfo. It also issues a warning. See http://www.postgresql.org/message-id/524EF455.6050409@ntlworld.com 1.44_3 2013-10-11 [MISCELLANEOUS] Skip 70execute_array_native.t test if MS Access - even if behind an ODBC Bridge. Fixed some compiler warnings when attempting to print/trace SvCUR. 1.44_2 2013-09-07 [BUG FIXES] When table_info was called with a '%' for any one of the catalog, schema or type arguments with the rest all '' (the empty string), only a list of catalogs, schemas or types should be returned. It was not doing that as it was changing empty strings to undef/NULL. pod for odbc_lob_read had an example only saying lob_read. TYPE attribute for odbc_lob_read was actually coded as Type. It is now as documented. The example lob_read.pl had the TYPE set to 999 from when I was testing it but it got checked in like this. MANIFEST contained column_info.pl but the file was coltest.pl [MISCELLANEOUS] Fixed RT 86379 - spelling mistakes in ODBC.pm and FAQ - thanks to David Steinbrunner. Added 82_table_info.t test. Added 87_odbc_log_read.t test. 1.44_1 2013-06-06 Moved from subversion to github as svn.perl.org is closing down. Changed docs to show new repository. [BUG FIXES] Fixed RT 84450 - Database Handle Attribute Fetch broken. Thanks to Stephen Oberholtzer for finding and supplying patch. Fixed problem with attributes on bind_col not being sticky. You'll probably only see this if you are using fetchall_arrayref with a slice and setting TYPE or attributes in bind_col first. 1.43 2013-03-06 This is a full release of all the 1.42_* development releases. plus: [BUG FIXES] Minor fix to 10handler.t test suite which relied on a native error being true instead of defined. 1.42_5 2013-01-25 [BUG FIXES] Not all modules used in test code were specified in build_requires. 1.42_4 2013-01-21 [ENHANCEMENTS] odbc_trace and odbc_trace_file are now full connection attributes so you can set them any time you like, not just in connect. 1.42_3 2013-01-17 [ENHANCEMENTS] Added odbc_trace_file and odbc_trace attributes to the connect method so you can now enable ODBC API tracing from the connect method instead of having to use the ODBC Driver Manager. These also only enable ODBC API tracing in the application which made the call unlike the ODBC Driver Manager settings. 1.42_2 2012-12-17 [MISCELLANEOUS] Changed any use of if SvUPGRADE to remove the if test as per email from Dave Mitchell and posting at http://www.xray.mpe.mpg.de/mailing-lists/perl5-porters/2012-12/msg00424.html. 1.42_1 2012-12-12 [BUG FIXES] DBD::ODBC's ExecDirect method did not return an SQLLEN so if you managed to affect a massive number of rows it would be cast to an int and hence precision lost. [CHANGE IN BEHAVIOUR] When you called DBI's execute method and odbc_exec_direct was not set (the default) if you managed to affect more rows than would fit into an int you would get the incorrect count (NOTE on 32 bit platforms ODBC's SQLRowCount can only return a 32bit value anyway). You would get whatever casting an SQLLEN to an int would give you. The fix for this needs a change to DBI (see RT 81911) and the change would probably impact every DBD so until then DBD::ODBC will a) warn if an overflow occurs and Warn is set on the handle b) return INT_MAX and c) provide a new statement method odbc_rows which you can use to get the correct value. [ENHANCEMENTS] New odbc_rows statement method (see above). [MISCELLANEOUS] New rt_81911.t test case. 1.42_0 2012-11-28 [BUG FIXES] MS Access requires a longchar column to be bound using SQL_LONGVARCHAR. However, MS Access does not support SQLDescribeParam and we default to SQL_VARCHAR in this case. The point at which we switch to SQL_LONGVARCHAR was defaulted to 4000 (for MS SQL Server). We now default to SQL_LONGVARCHAR for MS Access when data is > 255. This means you can remove those {TYPE => SQL_LONGVARCHAR} from your bind_param calls for longchar columns in MS Access. I seem to have introduced a bug in the test suite for MS Access. The last test in the 09bind test binds dates as varchars (by default) and this cannot work in MS Access (it needs to be a timestamp). This test was skipped in the past and the skip got removed. [MISCELLANEOUS] Steffen Goeldner reported some issues with execute_array in DBD::Oracle where if ArrayTupleStatus was not specified and an error occurred DBD::Oracle did not do the right thing. As I used DBD::Oracle as a base when I wrote execute_for_fetch in DBD::ODBC I added tests to the test suite to ensure these issues did not exist in DBD::ODBC. Minor change to sql_type_cast.t test which attempts to insert an integer into a varchar. No databases so far have complained about this until we ran the test against Derby. Changed to use '100'. RT 80446 - fix spelling mistake - thanks to Xavier Guimar. 1.41 2012-10-23 A full release of the 1.40 development release series. 1.40_3 2012-10-08 [BUG FIXES] Oops, changes to some rt tests fail when not run to MS SQL Server and they should not be run for other drivers - there was a double done_testing call. [CHANGE IN BEHAVIOUR] As I warned literally years ago DBD::ODBC's private function DescribeCol has been removed. You can use DBI's statement attributes like NAME, PRECISION etc, instead. All test code has been changed to remove calls to DescribeCol and GetTypeInfo. [MISCELLANEOUS] New example sqlserver_supplementary_chrs.pl added which shows that in MS SQL Server 2012 you can now store unicode characters over 0xFFFF (ones which are surrogate pairs). More documentation for odbc_out_connect_string. 1.40_2 2012-09-06 [BUG FIXES] Fixed rt 78838 - bind_param does not correctly stringify blessed objects when connected to MS SQL Server Fix issue in dbd_bind_ph where if you passed a sql type and were also attempting to change from in to out or vice versa or increasing the size of an output bound param it would not spot this error. Allowed the test cases to spot DB2 driver as libXXXdb2. [MISCELLANEOUS] New test cases added for some rts. Added Test::NoWarnings to some tests where it was missing. 1.40_1 2012-09-04 [BUG FIXES] Debian/Ubuntu have moved unixODBC into /usr/lib/i386-linux-gnu so look in this dir for unixODBC as well - thanks to Meastro for finding. Fixed rt 78838 I had a sequence point error which is only seen with some compilers as it is sometimes optimized out. It could cause DBD::ODBC to omit adding the UID/PWD to the end of the connection string when using DSN=. Thanks to Zsolt Cserna for spotting it and to ilmari and Leon for explaining it to me. Fixed rt 79397 Output bound parameters may be incorrectly bound if changed after bind_param_inout is called. If you start with an undef bound param and change it to a defined string/number less than 28 characters before calling execute the original undef will probably be bound. Thanks to runrig on perl monks for providing an example. [CHANGE IN BEHAVIOUR] If you attempt to bind an rv without amagic DBD::ODBC will now croak - related to rt 78838. 1.39 2012-07-07 [BUG FIXES] Manifest mentioned 2 files in examples which do not exist - they should have been execute_for_fetch.pl. execute_for_fetch.pl example had not be changed since odbc_disable_array_operations became odbc_array_operations. 1.38_3 2012-06-25 [BUG FIXES] Added encoding line to this file to stop pod test from complaining. [DOCUMENTATION] Added link to 64 bit ODBC article. Fixed some typos in the pod. [MISCELLANEOUS] Made pod.t an author test. 1.38_2 2012-05-24 [ENHANCEMENTS] Added support for Oracle TAF (assuming your ODBC driver supports it) - see odbc_taf_callback. 1.38_1 2012-05-19 [BUG FIXES] Fixed rt 77283. If you overrode the bind type as SQL_INTEGER in a bind_col call AFTER previously binding as another type (or not specifying a type) you would not get the right value back. This also fixes the DiscardString bind_col attribute for SQL_INTEGER binds (however, see below as DiscardString is no longer required for SQL_INTEGER). Fixed some format specifiers in trace calls. [CHANGE IN BEHAVIOUR] DBD::ODBC allowed you to change the bound column type in bind_col after the column was already bound. It now does not allow this and issues a warning. You can nolonger override the bound column type (except with SQL_NUMERIC and SQL_DOUBLE). All columns are now bound as either SQL_C_LONG (integer columns) or SQL_C_[W]CHAR (all other columns). If you are calling bind_col with a TYPE => xxx it most likely did not do what you expected and you should examine it carefully with a view to removing it altogether. As a result you no longer have to override the bind type for MS SQL Server XML columns - these will be bound as SQL_C_CHAR or SQL_C_WCHAR depending on whether Unicode is enabled. Integer columns are now bound as SQL_C_LONGs and not as before, SQL_C_CHAR. This should not matter to you but if you were adding 0 to your integer columns retrieved to make them behave like integers you should nolonger need to do it. [OTHER] Added some missing SQL_C_xxx types to S_SqlCTypeToString internal function. This only affects tracing. Some tests in 08bind were skipped when they did not need to be. sql_type_cast tests rewritten due to fixes above. 1.37 2012-04-07 A full release of the 1.36 dev releases. Please note the changes in behaviour below. 1.36_2 2012-03-31 [BUG FIXES] * not strictly a bug fix, more a workaround. MS Access mdb driver reports 22 for the size of doubles but then returns an empty string for long doubles that still fit in 22 chrs. rt 69864. [CHANGE IN BEHAVIOUR] * The odbc_disable_array_operations has been replaced with odbc_array_operations and the default for array operations is off. Sorry, but I warned this was experimental. The ODBC_DISABLE_ARRAY_OPERATIONS env var remains. [DOCUMENTATION] * Rewrote parts of "Unicode implementation in DBD::ODBC" to describe UTF-16/UCS-2 internal implementation. * Changed ordering of some of the pod sections. 1.36_1 2012-03-21 [BUG FIXES] * Fixed 12blob.t skip count when driver does not have a big enough varchar column to run the test. * Work around problems hit during the test suite in DB2 driver which only allows rows of size up to the page size and varchars of 4K. * Fix bug in execute_for_fetch where it would ignore the parameters processed when creating the ArrayTupleStatus and hence could attempt to call SQLGetDiagRec on a parameter row which was not executed. See the logs in rt 75687 which show this although this is not a fix for this rt. * The code wasn't catching success with info from SQLExecute in execute_for_fetch. * Add support for drivers (DB2 in this case) which return SQL_PARAM_DIAG_UNAVAILABLE in bound parameter status array when running execute_for_fetch. Any driver returning doing SQL_PARC_NO_BATCH from SQLGetInfo(SQL_PARAM_ARRAY_ROW_COUNTS) might do this. * Fix test code for execute_for_fetch to a) handle drivers doing SQL_PARC_NO_BATCH and b) add "not null" to primary key fields for drivers like DB2 which require it. [CHANGE IN BEHAVIOUR] * In execute_for_fetch set the parameter status array to all 9999 (which is invalid) so we can see if the ODBC driver actually sets them and we can warn if they don't. * For freeTDS default odbc_disable_array_operations to 1 as no version of the freeTDS driver can be found that works. I was requested to do this by the dbix-class guys. I may revert this later if freeTDS is fixed. * as above for MS Access. It is a shame I cannot find any way of finding out if a driver is capable of array operations. [ENHANCEMENTS] * execute_for_fetch code now checks the ODBC_DISABLE_ARRAY_OPERATIONS environment variable which can be set to 1 or 0 to override the internal default. [DOCUMENTATION] * Fixed ColAttributes example in pod which used a $dbh instead of a $sth. * Fixed DescribeCol example in pod which used a $dbh instead of a $sth. * new FAQ on SQLRowCount, freeTDS and execute_for_fetch * Fix typo shown in rt 75860. [OTHER] * Reduced usage of D_imp_xxx to avoid calls to dbih_getcom2. See thread on dbi-dev at http://www.mail-archive.com/dbi-dev@perl.org/msg06675.html * Changed the 70execute_array.t test to run it twice, once using DBI's methods and once using the native one in DBD::ODBC. * Made the 2 unicode tests work with DB2 ODBC driver. 1.35 2012-03-06 Full release of the 1.34 development releases 1.34_7 2012-03-02 [BUG FIXES] * Fixed more compiler errors highlighed by a smoker using MS Visual C where some code come before a variable definition. 1.34_6 2012-02-27 [BUG FIXES] * Fixed some compiler warnings and a compile error highlighed by a smoker using MS Visual C where some code come before a variable definition. 1.34_5 2012-02-17 [BUG FIXES] * The 40UnicodeRoundTrip tests counts could be 1 off in some cases. * Fix for t/03batt.t which could fail a test if the data source had no table - Kenichi Ishigaki * If a driver misbehaves during global destruction e.g. SQLFreeStmt fails but no error is available DBD::ODBC issues an error saying an error occurred but no error diagnostics could be found. This is pointless and irritating during global destruction. This stems from a change in 1.28. Thanks to Peter Rabbitson for reporting and suggested fix. [CHANGE IN BEHAVIOUR] * Prior to this release if you called selectall_* methods with a non-select statement DBD::ODBC would raise an error saying "no select statement currently executing". See RT 68720. After discussions on dbi-dev the concensus seems to be that issuing a warning in this case is better so that is what I've done. As a result t/rt_68720.t has been removed and t/85_selectall_non_select.t has been added. [DOCUMENTATION] * odbc_getdiagfield was incorrectly named odbc_getdiagrec in the pod * add DBI version required for StrictlyTyped and DiscardString to pod * Added new FAQ on why a transaction may be committed when AutoCommit is turned off. [OTHER] * Make examples\odbc_diag.pl more tolerant of drivers which do not handle diagnostic calls properly. * Make t/40UnicodeRoundTrip.t work with SQLite - Kenichi Ishigaki * Make t/odbc_describe_parameter.t work with SQLite - Kenichi Ishigaki * Add 80_odbc_diags.t based on the same file in examples 1.34_4 2012-02-05 [BUG FIXES] * When odbc_getdiag* methods were added they installed themselves into DBI but did not set IMP_KEEP_ERR so calling them cleared DBI's errors. 1.34_3 2012-02-03 [BUG FIXES] * Linking against unixODBC was working by accident on most UNIX machines and depended on the order of the files in /usr/lib (or wherever) and what files there were (e.g. an archive or a shared object). Same applied to iODBC but it was more broken especially on machines where libiodbc.so.N.N existed but there was no libiodbc.so which could lead to no adding the shared object at all. I doubt anyone really noticed this but I did eventually on Ubuntu where libiodbc.so.N.N existed but libiodbc.so did not. [ENHANCEMENTS] * Added experimental odbc_getdiagrec and odbc_getdiagrec methods, examples/odbc_diag.pl and examples/params_in_error.pl. [DOCUMENTATION] * New FAQ entries. 1.34_2 2012-01-25 [BUG FIXES] * Fixed rt73734 - debian moved where unixODBC libs are stored. * Fixed memory leak of the parameter status array introduced in previous release when execute_for_fetch used. When the statement handle is destroyed the parameter status array was not freed. [ENHANCEMENTS] * Added environment variable PERL_DBD_ODBC_PREFER_UNIXODBC as a synonym for -x from Rafael Kitover (Caelum). [DOCUMENTATION] * Add a deviation from DBI spec for type_info_all. [OTHER] * Added example execute_for_fetch.pl 1.34_1 2011-12-11 [ENHANCEMENTS] * Added experimental execute_for_fetch support and associated attributes odbc_batch_size and odbc_disable_array_operations. 1.33 2011-12-01 This is simply the official release of the 1.32 development releases. 1.32_5 2011-11-24 [ENHANCEMENTS] * Enable multiple active statement support in 70execute_array.t for drivers we recognise which support MAS. * Change column_info to support Unicode catalog/schema/table/column names. 1.32_4 2011-11-22 [BUG FIXES] * remove debugging printf which output "HERE" in some rare cases. rt 72534 - thanks John Deighan for spotting this. * The test 70execute_array.t could fail due to warning being output if the driver does not support Multiple Active Statements. [ENHANCEMENTS] * Use SQLGetTypeInfoW on unicode builds. 1.32_3 2011-11-15 [BUG FIXES] * Fix bug in utf16_copy which was not adding a trailing NUL but I'm not sure this affected anyone until I changed table_info this release. [ENHANCEMENTS] * DBD::ODBC now allows unicode catalog/schema/table parameters to be passed to table_info. Of course they will only reliably work with a supporting Unicode ODBC driver. 1.32_2 2011-10-22 [ENHANCEMENTS] * Added new odbc_driver_complete attribute allowing the ODBC Driver Manager and ODBC Driver to throw dialogues for incomplete connection strings or expired passwords etc. [OTHER] * added more examples [DOCUMENTATION] * new FAQ entries * added note saying you cannot pass unicode schema/table/column names to metadata calls like table_info/column_info currently. 1.32_1 2011-06-24 [BUG FIXES] * I omitted rt_68720.t from the 1.31 distribution which leads to a warning as it is mentioned in the MANIFEST. [OTHER] * Changed line endings in README.af and README.unicode to be unix line endings and native eol-style in subversion. * Minor changes to Makefile.PL to save the opensuse guys patching. * Added unicode_sql.pl and unicode_params.pl examples 1.31 2011-06-21 [BUG FIXES] Recently introduced test sql_type_cast.t cannot work with DBI less than 1.611. Minor change to Makefile.PL to avoid use of unitialised warning on $ENV{LD_LIBRARY_PATH} in warning when it is not set. 1.30_7 2011-06-15 [BUG FIXES] Some time ago (I don't know when) a few internal errors generated by DBD::ODBC got ignored. There are about 5 of them but I seriously doubt anyone would hit any other than the data truncated error (which is reported by the ODBC driver anyway) and "no select statement currently executing". You can see rt_68720.t in the t directory for an example of the latter. [ENHANCEMENTS] An extra argument has been added to the sub associated with odbc_err_handler. The arguments passed to the odbc_err_handler are now state (string), error (string), native error code (number) and the status returned from the last ODBC API. The status will be SQL_ERROR (-1) for errors or SQL_SUCCESS_WITH_INFO (1) for informational messages. 1.30_6 2011-06-04 [BUG FIXES] * When DBD::ODBC calls SQLExecDirect (the do method) it was not reporting informational diagnostics (SQL_SUCCESS_WITH_INFO) and not calling the error handler. Arguably, this is a change in behaviour but one I've struggled to resolve since in all other cases of warnings DBD::ODBC's error handler is called. However, DBI does not call its error handler for warnings so was DBD::ODBC wrong to call it's error in the first place for warnings? I decided it was better to leave this as it is but add checking of SQLExecDirect/do. Apart from anything else if DBD::ODBC does not call its error handler for informational diagnostics there is no way to retrieve print statements etc from procedures. * The odbc_describe_parameter.t test could fail with some versions of MS SQL Server ODBC Driver. It was down to when SQLDescribeParameter is disabled, the column_size passed to SQLBindParameter is 0. * pod example of odbc_err_handler incorrectly used \$err_handler instead of \&err_handler. 1.30_5 2011-05-24 [BUG FIXES] * The change in behavior detailed in 1.30_1 for wide character binding was still not working properly (see http://rt.cpan.org/Ticket/Display.html?id=67994). It was working for SQL_CHAR described columns but not SQL_VARCHAR. 1.30_4 2011-05-18 [BUG FIXES] * Fix issue described in http://www.nntp.perl.org/group/perl.dbi.dev/2011/05/msg6567.html. If you prepare a statement, disconnect and then try and execute the statement you get an error but it does not tell what is wrong. [ENHANCEMENTS] * Added support for StrictlyTyped and DiscardString to the bind_col method. [OTHER] * Minor changes to META.yml for mailing list, dynamic_config, homepage and keywords. * The pod was missing = before the heads on a couple of sections in "Private DBD::ODBC Functions" * TreatAsLob was incorrectly documented as BindAsLob. 1.30_3 2011-05-17 [BUG FIXES] * Made the new odbc_describe_parameters work and added test case. 1.30_2 2011-05-16 [ENHANCEMENTS] * Added the new odbc_describe_parameters attribute. 1.30_1 2011-05-12 [BUG FIXES] * Fixed some compiler warnings shown with -Wall including some printf formats that had extra/missing arguments. * Fixed t/70execute_array.t which was missing an "order by" in the check_data sub which could cause failures for drivers not returning the rows in the order they were inserted. * Minor fix to Makefile.PL to avoid issuing variable used in void context. [CHANGE IN BEHAVIOUR] * DBD::ODBC used to quietly rollback any transactions when disconnect was called and AutoCommit was off. This can mask a problem and leads to different behaviour when disconnect is called vs not calling disconnect (where you get a warning). This release issues a warning if you call disconnect and a transaction is in progress then it is rolled back. * DBD::ODBC used to bind char/varchar/longvarchar columns as SQL_CHAR meaning that in the unicode build of DBD::ODBC the bound column data would be returned 8bit in whatever character-set (codepage) the data was in, in the database. This was inconvenient and arguably a mistake. Columns like nchar/nvarchar etc were bound as SQL_WCHAR and returned as Unicode. This release changes the behaviour in a unicode build of DBD::ODBC to bind all char columns as SQL_WCHAR. This may inconvenience a few people who expected 8bit chars back, knew the char set and decoded them (sorry). See odbc_old_unicode to return to old behaviour. [ENHANCEMENTS] * added -w option to Makefile.PL to add "-Wall" to CCFLAGS and -fno-strict-aliasing so I can find warnings. * Cope with broken ODBC drivers that describe a parameter as SQL type 0. [OTHER] * Add "disconnect and transactions" to pod describing what DBD::ODBC does if you call disconnect with an outstanding transaction. * Reorganised FAQ for bound parameter issues and added a lot on bound parameter problems. * Added new FAQ entry for Firebird * Removed some unused variables and added some missing function prototypes 1.29 2011-03-08 * An official release of the 1.28 development releases. [NOTE] * The 1.28 development releases made a change which causes a generic error to be reported when an ODBC call fails but an error message is not retrieved from the ODBC Driver. It appears this has caught out a few ODBC drivers - notably freeTDS and Firebird. You now may see errors that were missed before e.g., DBIx::Class's tests for Firebird now errors test 21 "outer txn rolled back" (and others) because SQLRowCount returns an error after "ROLLBACK TO SAVEPOINT savepoint_0"; before this error was missed. 1.28_5 2011-03-06 [BUG FIXES] * rt_59621.t had wrong skip count * Fixed missing SQL_MAX_TABLE_NAME_LEN definition from test. * Fixed problem with some drivers which batch "insert;select" where SQLMoreResults is not required and an extra describe is done. * Fixed "select 1" in 02simple.t for Firebird ODBC Driver. * disconnect call added to 70execute_array.t was in the wrong place. * In non-unicode mode we bind strings as SQL_CHAR but the driver may have described them as SQL_WCHAR and we were not doing ChopBlanks processing in that case. [REQUIREMENTS] * Now needs Test::Simple 0.90. [OTHER] * Added dml_counts.pl example * worked around a problem in freeTDS in the 20SqlServer.t test provided by Ralph Doncaster. * Changed test rt_62033.t to try and make it work with freeTDS - I failed. It now skips attempts to fetch when the insert fails. * Worked around problem in Firebird ODBC driver which reports timestamps have a display size of 24 characters but then can return 29 which could lead to data truncation errors. See http://tracker.firebirdsql.org/browse/ODBC-112 * Worked around problem in Firebird ODBC driver which reports VARCHARs have a maximum length of 32765 but in fact it is 4000. See http://tracker.firebirdsql.org/browse/ODBC-111 * Improvements in tracing to take account of DBI's neatsvpv lops 5 characters off maxsize on a string. 1.28_4 2011-02-24 [BUG FIXES] * Fixed compilation problems with DBIf_TRACE_TXN * Added missing disconnect call to 70execute_array.t 1.28_3 2011-02-22 [BUG FIXES] * Fixed MANIFEST in 1.28_2 which named 2 files incorrectly. * Fixed use of PREREQ_PRINT in versions of ExtUtils::MakeMaker which don't support it. * Fixed a check on LD_LIBRARY_PATH for unixODBC which could report you've not set LD_LIBRARY_PATH correctly. [ENHANCEMENTS] * Added Perl and ExtUtils::MakeMaker version output to build process. * Added support for DBI's new trace flags ENC, CON, TXN and DBD. From DBI 1.617 you should be able to use: DBI_TRACE=DBD to ONLY get DBD::ODBC tracing without DBI tracing ENC and CON DBI flags are synonymous with DBD::ODBC's odbcconnection and odbcunicode trace flags which you can still use for now. [OTHER] * From now on I'm changing the way the Changes file is written as per article at http://blog.urth.org/2011/01/changes-file-how-and-how-not-to.html * Some broken drivers (freeTDS in this case) can return SQL_ERROR from an ODBC API function and then SQLError does not return error details. In this case set a generic error saying an error occurred but we could not retrieve it. * Added FAQ entry on MS Access and text columns limited to 255 chrs. * Added 70execute_array.t test. 1.28_2 2011-01-24 Added -x argument to Makefile.PL saying you prefer unixODBC over iODBC driver managers as a) we need to look for iODBC first on some platforms to detect iODBC and b) some platforms (Debian/Ubuntu) people have both binary packages installed but only unixODBC dev package. Patch from Rafael Kitover (Caelum) for better Cygwin handling. Minor change to data sources test to cope with someone having no data sources and using a DSN-less connection for testing. Fixed MARS test when a DSN-less connection used for testing - thanks to Rafael Kitover (Caelum) for spotting this. pod patch for "CPAN Testers Reporting" to point at cpan testers wiki from Rafael Kitover (Caelum). Fixed some broken links in the FAQ. Add a multiple active statement document link to random links and the FAQ entry. A call to SQLNumResultCols was not checked to test it succeeded. Not seen anyone run into this as yet. 1.28_1 2010-12-29 Rewrote documentation on odbc_SQL_ROWSET_SIZE and added loads of notes so I don't have to go through a huge irc conversation with ribasushi again. Workaround bug in DBI (prior to 1.616) which mistakenly issues a FETCH on any attribute passed to the connect method and sometimes driver managers error on SQL_ROWSET_SIZE in SQLGetConnectAttr. ChopBlanks was not working on UCS-2 encoded data written into bound columns and was also sometimes reading off the end of the bound array. Minor FAQ changes: Added an additional way to read MS Access dbs from Unix Clarified versions for MARS_Connection updates to cancel_big_fetch.pl Updated TO_DO with more stuff to do Improved tracing output Tidied up some of the examples 1.27 2010-12-29 Official release of the 1.26 development releases. 1.26_4 2010-12-14 Fixed bug highlighted by investigation into rt 62033 where the active flag was sometimes not set on the statement after SQLMoreResults indicates a result-set exists. Fix rt 63550 reported by John Corcoran where if a do method call fails the SQL C is not available in an error handler as we never created a DBI statement in the first place. Added a note to "do" deviations pod. Minor fix to head at wrong level in the pod. Fix a possible snprintf buffer overflow in GetTypeInfo when the type is specified and it is negative. 1.26_3 2010-11-18 Fixed rt 63108. The change to column binding in 1.24_2 was not complete and causes bound columns to be rebound on each execute wasting time and leaking memory. Found, diagnosed and proposed fix by Steve Bentley. 1.26_2 2010-11-09 Fixed bug found by frew where an snprintf can overflow when binding a lot of parameters. 1.26_1 2010-10-24 There are over 200 block changes in the source code for this release from the previous one (see below). If you are using DBD::ODBC in production you should not upgrade without testing this release first as it introduces a lot of internal changes. DBD::ODBC has now gone entirely ODBC 3 and relies on an ODBC Driver Manager to map calls to ODBC 2.0 drivers (why are you still using ODBC 2.0 drivers?). From now on DBD::ODBC needs to be linked with an ODBC Driver Manager and I recommend unixODBC on UNIX and the MS ODBC Driver Manager on Windows. There are really good reasons for this but mainly it is because an ODBC Driver Manager will map ODBC 2.0 calls to an ODBC 3.0 driver and vice versa and handle UNICODE transparently. Bumped Test::Simple requirement up to 0.90 so we can use done_testing and note. Bump Perl requirement to 5.8 as per DBI. Workaround a bug in mdbtools which fails to set the out connection string in a SQLDriverConnect call. This can lead to: Fixed panic: sv_setpvn called with negative strlen at blib/lib/DBD/ODBC.pm line 107. Added rt_61370.t for rt 61370. Removed last remaining sprintf calls and replaced with snprintf. Changed the point at which DBD::ODBC switches from VARCHAR to LONGVARCHAR or WVARCHAR to WLONGVARCHAR when SQLDesribeParam fails. It was 4000 and is now 2000 for unicode builds. Works around a daft issue in the MS SQL Server driver which will not allow 'x' x 2001 converted to wide characters to be inserted into a varchar(8000). Minor change to Makefile.PL to print out found libs for iODBC and unixODBC. Added some FAQs for problems with iODBC and a recent bug in DBI. Added FAQ on my_snprintf problem. Fixed some pod errors in FAQ document. Fixed warning on 64 bit Strawberry Perl when compiling dbdimp.c for cast from ptr to UDWORD. Moved to_do items from Changes to TO_DO. Reformatted this file to save Jens work. Changed calls to SQLTransact (ODBC 2.0) to SQLEndTran (ODBC 3.0). There really shouldn't be any ODBC 2.0 drivers still around but even if there are, the ODBC driver manager should do the mapping for us. Changed calls to SQLGetConnectOption/SQLSetConnectOption to to SQLGetConnectAttr/SQLSetConnectAttr for ODBC 3.0. Changed calls to SQLColAttributes (ODBC 2.0) to SQLColAttribute (ODBC 3.0). Bumped requirement on DBI to 1.609 because that is the first version to contain a dbipport.h which defined my_snprintf - see https://rt.cpan.org/Public/Bug/Display.html?id=62346 and http://www.nntp.perl.org/group/perl.dbi.dev/2010/10/msg6335.html Various small changes to dbdimp.c which should have no effect other than to make it leaner: Removed all dTHR occurrences from dbdimp.c as it is a NOOP since 5.8 and we need 5.8 at least. Removed dbd_param_err as it was not used Removed odbc_get_query_timeout as it was never compiled Removed eod field from statement as it was never used Removed a load of commented out code Replaced some SvPV calls with SvPV_nolen when we didn't used the length Removed some silly code from dbd_db_execdirect which attempted to return -3 - don't think it could ever get run. Minor tracing output tidy up Removed dbd_caution as it was no used Localised more variable declarations 1.25 2010-09-22 Official release of 1.25 combining all the changes in the 1.24_x development releases. 1.24_6 2010-09-16 rt 61370 - default XML type parameters in SQL Server to SQL_WCHAR so they accept unicode strings. 1.24_5 2010-09-15 Fixed missing SvSETMAGIC on a bound scalar which was causing length() to return the wrong result - see http://www.perlmonks.org/?node_id=860211 and a big thank you to Perl Monks and in particular ikegami. Changed bind_col so it actually pays attention to the TYPE attribute as you could not override the bind type of a bound column before. 1.24_4 2010-09-08 Left a sv_undef in - thanks smoke testers for finding that. Change sprintf to snprintf for safety. 1.24_3 2010-09-06 Added note from Robert Freimuth for obtaining the last insert ID in MS Access. Changed all &sv_yes/&sv_no occurrances in XS to PL_sv_yes/PL_sv_no as the originals have now gone from blead Perl. Minor change to fix missing newline in trace output. Added a FAQ entry for how "use regional settings" in MS SQL Server breaks things. 1.24_2 2010-07-23 Fix rt57957 reported by Marc Prewitt. DBD::ODBC was not ignoring named placeholders and ? inside comments. Comments are deemed as text between "/*" and "*/" (if not in a literal) and line comments begin with "--". Added a FAQ on procedures not completing in MS SQL Server. Thanks to Peter Rabbitson for hitting this problem and reminding me I'd seen it a couple of times before. Added a FAQ on equality comparisons with ntext columns. Added pod for last_insert_id which is not currently supported. Fix bug where if SQLMoreResults was called and failed it was not reported. Removed some unused fields from the fbh structure which should save a little memory for each column in a result-set. Started adding support for DBI's DiscardString and StrictlyTyped but not complete yet so don't use them yet. Added experimental odbc_lob_read method - see pod. Thanks to tye and ikegami on perlmonks for pointing out some problems with my initial implementation. Moved the binding of columns to the first call to fetch instead of after execute is called as it prevents bind_col overrriding the type used for binding and I needed it to support odbc_lob_read. This may have undesired affects so any testing of this release would be appreciated. Added bind_col method so DBD::ODBC can support attributes on a bind_col call. Removed support for DBI's blob_read - it was totally flawed and did not work at all. May replace in the future. Added support for MS SQL Server XML type (SQL type -152). See rt 59621. Added note on do method implementation in DBD::ODBC and how some may consider it to deviate from the DBI specification. 1.24_1 2010-05-27 Corrected pod and private attributes for the odbc_SQL_DRIVER_ODBC_VER attribute which was documented as SQL_DRIVER_ODBC_VER. Added FAQ on pauses on statement destruction when all the result-set has not been retrieved (mostly freeTDS and MS SQL Server ODBC Driver). Fixed bug introduced in 1.24 where if you are using MS SQL Server, and you are preparing, binding placeholders and re-executing multiple times you may get a "Invalid character value for cast specification" error. Thanks to anonymous for spotting this and producing a standalone example of the problem that made it so much easier to find. 1.24 2010-05-14 Minor change in Makefile.PL to only use NO_META if ExtUtils::MakeMaker is at least at version 6.10. Reported by Chunmei Wu. Minor change to test rt_50852 which had wrong skip count. 1.23_5 2010-05-06 Added advice from Jan Dubois (ActiveState) on building DBD::ODBC for ActivePerl (see README.windows). rt56692. Fix spelling mistake in DBD::ODBC pod - thanks to Ansgar Burchardt. Added a 7th way to help documentation - become a tester. Hopefully fixed problems building on windows 32 bit platforms that have old sql header files not mentioning SQLLEN/SQLULEN. 1.23_4 2010-04-13 Added more FAQs. Small optimization to remove calls to SQLError when tracing is not turned on. This was a bug. We only need to call SQLError when SQLExecute succeeds if there is an error handler or if tracing is enabled. The test was for tracing disabled! Large experimental change primarily affecting MS SQL Server users but it does impact on other drivers too. Firstly, for MS SQL Server users we no longer SQLFreeStmt(SQL_RESET_PARAMS) and rebind bound parameters as it is causing the MS SQL Server ODBC driver to re-prepare the SQL. Secondly (for all drivers) we no longer call SQLBindParameter again IF all the arguments to it are the same as the previous call. If you find something not working you better let me know as this is such a speed up I'm going to go with this unless anyone complains. Minor change to avoid a double call to SQLGetInfo for SQL_DBMS_NAME immediately after connection. Small change for rt 55736 (reported by Matthew Kidd) to not assume a parameter is varXXX(max) if SQLDescribeParam failed in the Microsoft Native Client driver. 1.23_3 2010-03-24 Minor changes to Makefile.PL and dbdimp.c to remove some compiler warnings. Fix some calls to SQLMoreResults which were not passing informational messages on to DBI's set_err. As you could not see all the informational messages from procedures, only the first. Fix minor issue in 02simple test which printed the Perl subversion before the version. Changes to 20SqlServer.t to fix a few typos and make table names consistent wrt to case - (as someone had turned on case-sensitivity in SQL Server) Similar changes in rt_38977.t and rt_50852.t 1.23_2 2010-01-26 Fixed bug in Makefile.PL which could fail to find unixODBC/iODBC header files but not report it as a problem. Thanks to Thomas J. Dillman and his smoker for finding this. Fixed some compiler warnings in dbdimp.c output by latest gcc wrt to format specifiers in calls to PerlIO_printf. Added the odbc_force_bind_type attribute to help sort out problems with ODBC Drivers which support SQLDescribeParam but describe the parameters incorrectly (see rt 50852). Test case also added as rt_50852.t. 1.23_1 2009-10-21 makefile.PL changes: some formatting changes to output warn if unixodbc headers are not found that the unixodbc-dev package is not installed use $arext instead of "a" pattern match for pulling libodbc.* changed warn if DBI_DSN etc not defined change odbc_config output for stderr to /dev/null missing / on /usr/local wheb finding find_dm_hdr_files() New FAQ entries from Oystein Torget for bind parameter bugs in SQL Server. rt_46597.rt - update on wrong table Copied dbivport.h from the latest DBI distribution into DBD::ODBC. Added if_you_are_taking_over_this_code.txt. Add latest Devel::PPPort ppport.h to DBD::ODBC and followed all recommendations for changes to dbdimp.c. Added change to Makefile.PL provided by Shawn Zong to make Windows/Cygwin work again. Minor change to Makefile.PL to output env vars to help in debugging peoples build failures. Added odbc_utf8_on attribute to dbh and sth handles to mark all strings coming from the database as utf8. This is for Aster (based on PostgreSQL) which returns all strings as UTF-8 encoded unicode. Thanks to Noel Burton-Krahn. 1.23 2009-09-11 Only a readme change and version bumped to 1.23. This is a full release of all the 1.22_x development releases. 1.22_3 2009-08-19 Fix skip count in rt_38977.t and typo in ok call. Workaround a bug in unixODBC 2.2.11 which can write off the end of the string buffer passed to SQLColAttributes. Fix skip count in rt_null_nvarchar.t test for non SQL Server drivers. Fix test in 02simple.t which reported a fail if you have no ODBC datasources. In 99_yaml.t pick up the yaml spec version from the meta file instead of specifying it. Change calls to SQLPrepare which passed in the string length of the SQL to use SQL_NTS because a) they are null terminated and more importantly b) unixODBC contains a bug in versions up to 2.2.16 which can overwrite the stack by 1 byte if the string length is specified and not built with iconv support and converting the SQL from ASCII to Unicode. Fixed bug in ping method reported by Lee Anne Lester where it dies if used after the connection is closed. A great deal of changes to Makefile.PL to improve the automatic detection and configuration for ODBC driver managers - especially on 64bit platforms. See rt47650 from Marten Lehmann which started it all off. Add changes from Chris Clark for detecting IngresCLI. Fix for rt 48304. If you are using a Microsoft SQL Server database and nvarchar(max) you could not insert values between 4001 and 8000 (inclusive) in size. A test was added to the existing rt_38977.t test. Thanks to Michael Thomas for spotting this. Added FAQ on UTF-8 encoding and IBM iSeries ODBC driver. Add support for not passing usernames and passwords in call to connect. Previously DBD::ODBC would set an unspecified username/password to '' in ODBC.pm before calling one of the login_xxx functions. This allows the driver to pull the username/password from elsewhere e.g., like the odbc.ini file. 1.22_1 2009-06-16 Applied a slightly modified version of patch from Jens Rehsack to improve support for finding the iODBC driver manager. A UNICODE enabled DBD::ODBC (the default on Windows) did not handle UNICODE usernames and passwords in the connect call properly. Updated "Attribution" in ODBC.pm. Unicode support is no longer experimental hence warning and prompt removed from the Makefile.PL. old_ping method removed. Fixed bug in 02simple.t test which is supposed to check you have at least one data source defined. Unfortunately, it was checking you had more than 1 data source defined. rt_null_varchar had wrong skip count meaning non-sql-server drivers or sql server drivers too old skipped 2 tests more than were planned. 1.22 2009-06-10 Fixed bug which led to "Use of uninitialized value in subroutine entry" warnings when writing a NULL into a NVARCHAR with a unicode-enabled DBD::ODBC. Thanks to Jirka Novak and Pavel Richter who found, reported and patched a fix. Fixed serious bug in unicode_helper.c for utf16_len which I'm ashamed to say was using an unsigned short to return the length. This meant you could never have UTF16 strings of more than ~64K without risking serious problems. The DBD::ODBC test code actually got a *** glibc detected *** /usr/bin/perl: double free or corruption (out): 0x406dd008 *** If you use a UNICODE enabled DBD::ODBC (the default on Windows) and unicode strings larger than 64K you should definitely upgrade now. 1.21_1 2009-06-02 Fixed bug referred to in rt 46597 reported by taioba and identified by Tim Bunce. In Calls to bind_param for a given statement handle if you specify a SQL type to bind as, this should be "sticky" for that parameter. That means if you do: $sth->bind_param(1, $param, DBI::SQL_LONGVARCHAR) and follow it up with execute calls that also specify the parameter: $sth->execute("a param"); then the parameter should stick with the SQL_LONGVARCHAR type and not revert to the default parameter type. The DBI docs (from 1.609) make it clear the parameter type is sticky for the duration of the statement but some DBDs allow the parameter to be rebound with a different type - DBD::ODBC is one of those drivers. 1.21 2009-04-27 Change 02simple test to output Perl, DBI and DBD::ODBC versions. Fixed bug where if ODBC driver supports SQLDescribeParam and it succeeds for a parameterised query but you override the parameter type, DBD::ODBC was still using the size returned by SQLDescribeParam. Thanks to Brian Becker for finding, diagnosing and fixing this issue. Added FAQ entry about SQL Server and calling procedures with named parameters out of order. Added test_results.txt containing some supplied make test results. 1.20 2009-04-20 Fix bug in handling of SQL_WLONGVARCHAR when not built with unicode support. The column was not identified as a long column and hence the size of the column was not restricted to LongReadLen. Can cause DBD::ODBC to attempt to allocate a huge amount of memory. Minor changes to Makefile.PL to help diagnose how it decided which driver manager to use and where it was found. Offer suggestion to debian-based systems when some of unixODBC is found (the bin part) but the development part is missing. In 20SqlServer.t attempt to drop any procedures we created if they still exist at the end of the test. Reported by Michael Higgins. In 12blob.t separate code to delete test table into sub and call at being and end, handle failures from prepare there were two ENDs. In ODBCTEST.pm when no acceptable test column type is found output all the found types and BAIL_OUT the entire test. Skip rt_39841.t unless actually using the SQL Server ODBC driver or native client. Handle drivers which return 0 for SQL_MAX_COLUMN_NAME_LEN. Double the buffer size used for column names if built with unicode. 1.19 2009-04-02 Some minor diagnostic output during tests when running against freeTDS to show we know of issues in freeTDS. Fixed issue in 20SqlServer.t where the connection string got set with two consecutive semi-colons. Most drivers don't mind this but freeTDS ignores everything after that point in the connection string. Quieten some delete table output during tests. Handle connect failures in 20SqlServer.t in the multiple active statement tests. In 02simple.t cope with ODBC drivers or databases that do not need a username or password (MS Access). In 20SqlServer.t fix skip count and an erroneous assignment for driver_name. Change some if tests to Test::More->is tests in 02simple.t. Fix "invalid precision" error during tests with the new ACEODBC.DLL MS Access driver. Same workaround applied for the old MS Access driver (ODBCJT32.DLL) some time ago. Fix out of memory error during tests against the new MS Access driver (ACEODBC.DLL). The problem appears to be that the new Access driver reports ridiculously large parameter sizes for "select ?" queries and there are some of these in the unicode round trip test. Fixed minor typo in Makefile.PL - diagnostic message mentioned "ODBC HOME" instead of ODBCHOME. 12blob.t test somehow got lost from MANIFEST - replaced. Also changed algorithm to get a long char type column as some MS Access drivers only show SQL_WLONGVARCHAR type in unicode. Added diagnostic output to 02simple.t to show the state of odbc_has_unicode. 1.18_4 2009-03-13 A mistake in the MANIFEST lead to the rt_43384.t test being omitted. Brian Becker reported the tables PERL_DBD_39897 and PERL_DBD_TEST are left behind after testing. I've fixed the former but not the latter yet. Yet another variation on the changes for rt 43384. If the parameter is bound specifically as SQL_VARCHAR, you got invalid precision error. Thanks to Øystein Torget for finding this and helping me verify the fix. If you attempt to insert large amounts of data into MS Access (which does not have SQLDescribeParam) you can get an invalid precision error which can be worked around by setting the bind type to SQL_LONGVARCHAR. This version does that for you. 08bind2.t had a wrong skip count. 12blob.t had strict commented out and GetTypeInfo was not quoted. Also introduced a skip if the execute fails as it just leads to more obvious failures. In dbdimp.c/rebind_ph there was a specific workaround for SQL Server which was not done after testing if we are using SQL Server - this was breaking tests to MS Access. 1.18_2 2009-03-09 Added yet another workaround for the SQL Server Native Client driver version 2007.100.1600.22 and 2005.90.1399.00 (driver version 09.00.1399) which leads to HY104, "Invalid precision value" in the rt_39841.t test. 1.18_1 2009-03-06 Fixed bug reported by Toni Salomäki leading to a describe failed error when calling procedures with no results. Test cases added to 20SqlServer.t. Fixed bug rt 43384 reported by Øystein Torget where you cannot insert more than 127 characters into a Microsoft Access text(255) column when DBD::ODBC is built in unicode mode. 1.18 2009-01-16 Major release of all the 1.17 development releases below. 1.17_3 2008-12-19 Reinstated the answer in the FAQ for "Why do I get invalid value for cast specification" which had got lost - thanks to EvanCarroll in rt41663. rt 41713. Applied patch from JHF Remmelzwaal to handle ODBC drivers which do not support SQL_COLUMN_DISPLAY_SIZE and SQL_COLUMN_LENGTH attributes in the SQLColAttributes calls after SQLTables and SQLColumns. Specifically, the driver he was using was the "Infor Integration ODBC driver". Added notes from JHF Remmelzwaal on resolving some problems he came across building DBD::ODBC on Windows with Visual Studio 6.0 and SDK Feb 2003. New odbc_column_display_size attribute for when drivers does not return a display size. Loads of tracing changes to make it easier for me to debug problems. Fixed bug in tracing of dbd_execute when parameter is char but undef which was leading to an access violation on Windows when tracing enabled. Minor changes to diagnostic output in some rt tests. One of the rt tests was not skipping the correct number of tests if the driver was not SQL Server. 1.17_2 2008-11-17 Changed ParamTypes attribute to be as specification i.e., { parameter_1 => {TYPE => sql_type} parameter_2 => {TYPE => sql_type} ... } and changed the tests in 07bind.t to reflect this. A few minor perlcritic changes to ODBC.pm. Added 99_yaml.t test to check META.yml. Added patch from Spicy Jack to workaround problems with Strawberry Perl setting INC on the command line when running Makefile.PL. 1.17_1 2008-10-10 Missing newline from end of META.yml upsets cpan Add code to Makefile.PL to spot command line containing INC, outline problem and resolution and not generate Makefile to avoid cpan-testers failures. Loads of pod formatting changes including a section in the wrong place New kwalitee test Fix rt 39841. Bug in SQL Server ODBC driver which describes parameters by rearranging your SQL to do a select on the columns matching the parameters. Sometimes it gets this wrong and ends up describing the wrong column. This can lead to a varchar(10) being described with a column-size less than 10 and hence you get data truncation on execute. Added a test case for rt 39841. Fix rt 39897. 1.17 added support for varchar(max) in SQL Server but it broke support for SQL_VARCHAR columns in that they had LongReadLen and LongTruncOk applied to them. This means that in 1.16 you could retrieve a SQL_VARCHAR column without worrying about how long it was but in 1.17 if the same column was greater than 80 characters then you would get a truncated error. The only way the around this was to set LongTruncOk or LongReadLen. Added a test case for rt 39897. 1.17 2008-09-22 In the absence of any bug reports since 1.16_4 this is the official 1.17 release. See below for changes since 1.16. Minor pod changes. Added support for ParamTypes (see DBI spec) and notes in DBD::ODBC pod. 1.16_4 2008-09-12 Small change to Makefile.PL to work around problem in darwin 8 with iODBC which leads to "Symbol not found: _SQLGetPrivateProfileString" errors. Added new [n]varXXX(max) column type tests to 20SqlServer.t. Fixed support for SQL_WCHAR and SQL_WVARCHAR support in non-unicode build. These types had ended up only being included for unicode builds. More changes to ODBC pod to 1) encourage people to use CPAN::Reporter, 2) rework contributing section, 3) mention DBIx::Log4perl 4) add a BUGS section 5) add a "ODBC Support in ODBC Drivers" section etc. Changed default fallback parameter bind type to SQL_WVARCHAR for unicode builds. This affects ODBC drivers which don't have SQLDescribeParam. Problem reported by Vasili Galka with MS Access when reading unicode data from one table and inserting it into another table. The read data was unicode but it was inserted as SQL_CHARs because SQLDescribeParam does not exist in MS Access so we fallback to either a default bind type (which was SQL_VARCHAR) or whatever was specified in the bind_param call. Fixed bug in 20SqlServer.t when DBI_DSN is defined including "DSN=". 1.16_3 2008-09-03 Changed Makefile.PL to add "-framework CoreFoundation" to linker line on OSX/darwin. Disallow building with iODBC if it is a unicode build. More tracing for odbcconnect flag. Fix bug in out connection string handling that attempted to use an out connection string when SQLDriverConnect[W] fails. Fixed yet more test count problems due to Test::NoWarnings not being installed. Skip private_attribute_info tests if DBI < 1.54 About a 30% rewrite of bound parameter code which started with an attempt to support the new VARBINARY(MAX) and VARCHAR(MAX) columns in SQL Server when the parameter length is > 400K in size (see elsewhere in this Changelog). This is a seriously big change to DBD::ODBC to attempt to be a bit more clever in its handling of drivers which either do not support SQLDescribeParam or do support SQLDescribeParam but cannot describe all parameters e.g., MS SQL Server ODBC driver cannot describe "select ?, LEN(?)". If you specify the bound parameter type in your calls to bind_param and run them to an ODBC driver which supports SQLDescribeParam you may want to check carefully and probably remove the parameter type from the bind_param method call. Added rt_38977.t test to test suite to test varchar(max) and varbinary(max) columns in SQL Server. Moved most of README.unicode to ODBC.pm pod. Added workaround for problem with the Microsoft SQL Server driver when attempting to insert more than 400K into a varbinary(max) or varchar(max) column. Thanks to Julian Lishev for finding the problem and identifying 2 possible solutions. 1.16_2 2008-09-02 Removed szDummyBuffer field from imp_fbh_st and code in dbd_describe which clears it. It was never used so this was a waste of time. Changed the remaining calls to SQLAllocEnv, SQLAllocConnect and SQLAllocStmt and their respective free calls to the ODBC 3.0 SQLAllocHandle and SQLFreeHandle equivalents. Rewrote ColAttributes code to understand string and numeric attributes rather than trying to guess by what the driver returns. If you see any change in behaviour in ColAttributes calls you'll have to let me know as there were a number of undocumented workarounds for drivers. Unicode build of DBD::ODBC now supports: column names The retrieval of unicode column names SQL strings Unicode in prepare strings (but not unicode parameter names) e.g., select unicode_column from unicode_table is fine but select * from table where column = :unicode_param_name is not so stick to ascii parameter names if you use named parameters. Unicode SQL strings passed to the do method are supported. SQL strings passed to DBD::ODBC when the odbc_exec_direct attribute is set will not be passed as unicode strings - this is a limitation of the odbc_exec_direct attribute. connection strings True unicode connection string support will require a new version of DBI (post 1.607). Note that even though unicode connection strings are not supported currently DBD::ODBC has had to be changed to call SQLDriverConnectW/SQLConnectW to indicate to the driver manager it's intention to use some of the ODBC wide APIs. This only affects DBD::ODBC when built for unicode. odbcunicode trace flag There is a new odbcunicode trace flag to enable unicode-specific tracing. Skipped 40Unicode.t test if the ODBC driver is Oracle's ODBC as I cannot make it work. Changes internally to use sv_utf8_decode (where defined) instead of setting utf8 flag. Fix problems in the test when Test::NoWarnings is not installed. Removed some unused variables that were leading to compiler warnings. Changed a lot of tracing to use new odbcconnection flag Changed to use dbd_db_login6_sv if DBI supports it. Commented out a diag in 20SqlServer.t that was leading to confusion. Added diag to 20SqlServer.t in mars test to explain why it may fail. Various pod changes for clarification and to note odbc_err_handler is deprecated. Removed odbcdev trace flag - it was not really used. New odbc_out_connect_string attribute for connections which returns the ODBC driver out connection string. 1.16_1 2008-08-28 Fixed bug in odbc_cancel which was checking the ACTIVE flag was on before calling SQLCancel. Non-select statements can also be cancelled so this was wrong. Thanks to Dean Arnold for spotting. Minor changes to test 01base to attempt to catch install_driver failing, report it as a fail and skip other tests. Fixed bug reported by James K. Lowden with asynchronous mode and SQLParamData where the code was not expecting SQL_STILL_EXECUTING and did not handle it Added odbc_putdata_start attribute to determine when to start using SQLPutData on lobs. Fixed bug in lob inserts where decimal_digits was being set to the size of the bound lob unnecessarily. Minor change to connect/login code to delete driver-specific attributes passed to connect so they do not have to be processed again when DBI calls STORE with them. New 12blob.t test. A lot of code tidy up but not expecting any real benefit or detriment when using DBD::ODBC. Fixed retrieving [n]varchar(max) columns which were only returning 1 byte - thanks to Fumiaki Yoshimatsu and perl monks for finding it. See http://markmail.org/message/fiym5r7q22oqlzsf#query:Fumiaki Yoshimatsu odbc+page:1+mid:fiym5r7q22oqlzsf+state:results Various minor changes to get the CPANTS kwalitee score up. fixed pod issues in FAQ.pm moved mytest dir to examples added generated_by and requires perl version to META.yml added pod and pod-coverage tests removed executable flag from Makefile.PL added use warnings to some modules and tests fixed pod errors in ODBC.pm added AUTHOR and LICENSE section to ODBC.pm added Test::NoWarnings to tests Added support for setting the new(ish) DBI ReadOnly attribute on a connection. See notes in pod. Changes to test suite to work around problems in Oracle's instant client 11r1 ODBC driver for Linux (SQLColAttributes problems - see 02simple.t). New tests in 30Oracle.t for oracle procedures. 1.16 2008-05-13 [TESTS] Small change to the last test in 10handler.t to cope with the prepare failing instead of the execute failing - spotted by Andrei Kovalevski with the ODBCng Postgres driver. Changed the 20SqlServer.t test to specifically disable MARS for the test to check multiple active statements and added a new test to check that when MARS_Connection is enabled multiple active statements are allowed. Changed the 09multi.t test to use ; as a SQL statement seperator instead of a newline. A few minor "use of unitialised" fixes in tests when a test fails. In 02simple.t Output DBMS_NAME/VER, DRIVER_NAME/VER as useful debugging aid when cpan testers report a fail. 2 new tests for odbc_query_timeout added to 03dbatt.t. Changed 02simple.t test which did not work for Oracle due to a "select 1" in the test. Test changed to do "select 1 from dual" for Oracle. New tests for numbered and named placeholders. [DOCUMENTATION] Added references to DBD::ODBC ohloh listing and markmail archives. Added Tracing sections. Added "Deviations from the DBI specification" section. Moved the FAQ entries from ODBC.pm to new FAQ document. You can view the FAQ with perldoc DBD::ODBC::FAQ. Added provisional README.windows document. Rewrote pod for odbc_query_timeout. Added a README.osx. [INTERNAL CHANGES] More tracing in dbdimp.c for named parameters. #ifdeffed out odbc_get_primary_keys in dbdimp.c as it is no longer used. $h->func($catalog, $schema, $table, 'GetPrimaryKeys') ends up in dbdimp.c/dbd_st_primary_keys now. Reformatted dbdimp.c to avoid going over 80 columns. Tracing changed. Levels reviewed and changed in many cases avoiding levels 1 and 2 which are reserved for DBI. Now using DBIc_TRACE macro internally. Also tracing SQL when 'SQL' flag set. [BUILD CHANGES] Changes to Makefile.PL to fix a newly introduced bug with 'tr', remove easysoft OOB detection and to try and use odbc_config and odbcinst if we find them to aid automatic configuration. This latter change also adds "odbc_config --cflags" to the CC line when building DBD::ODBC. Avoid warning when testing ExtUtils::MakeMaker version and it is a test release with an underscore in the version. [ENHANCEMENTS] Added support for parse_trace_flag and parse_trace_flags methods and defined a DBD::ODBC private flag 'odbcdev' as a test case. Add support for the 'SQL' trace type. Added private trace type odbcdev as an experimental start. Change odbc_query_timeout attribute handling so if it is set to 0 after having set it to a non-zero value the default of no time out is restored. Added support for DBI's statistics_info method. [BUG FIXES] Fix bug in support for named placeholders leading to error "Can't rebind placeholder" when there is more than one named placeholder. Guard against scripts attempting to use a named placeholder more than once in a single SQL statement. If you called some methods after disconnecting (e.g., prepare/do and any of the DBD::ODBC specific methods via "func") then no error was generated. Fixed issue with use of true/false as fields names in structure on MAC OS X 10.5 (Leopard) thanks to Hayden Stainsby. Remove tracing of bound wide characters as it relies on null-terminated strings that don't exist. Fix issue causing a problem with repeatedly executing a stored procedure which returns no result-set. SQLMoreResults was only called on the first execute and some drivers (SQL Server) insist a procedure is not finished until SQLMoreResults returns SQL_NO_DATA. 1.15 2008-01-29 1.15 final release. Fixed bug reported by Toni Salomaki where DBD::ODBC may call SQLNumResultCols after SQLMoreResults returns SQL_NO_DATA. It led to the error: Describe failed during DBI::st=HASH(0x19c2048)->FETCH(NUM_OF_FIELDS,0) when NUM_OF_FIELDS was referenced in the Perl script. Updated odbc_exec_direct documentation to describe its requirement when creating temporary objects in SQL Server. Added FAQ on SQL Server temporary tables. Fixed bug in dbdimp.c which was using SQL_WCHAR without testing it was defined - thanks Jergen Dutch. Fixed use of "our" in UCHelp.pm which fails on older Perls. Minor changes to 02simple.t and 03dbatt.t to fix diagnostics output and help debug DBD which does not handle long data properly. Further changes to Makefile.PL to avoid change in behaviour of ExtUtils::MakeMaker wrt order of execution of PREREQ_PM and CONFIGURE. Now if DBI::DBD is not installed we just warn and exit 0 to avoid a cpan-testers failure. 1.15_2 2007-11-14 Fix bug in DBD::ODBC's private function data_sources which was returning data sources prefixed with "DBI:ODBC" instead of "dbi:ODBC". If you don't have at least DBI 1.21 it is now a fatal error instead of just a warning. DBI->connect changed so informational diagnostics like "Changed database context to 'master'" from SQL Server are available in errstr/state. These don't cause DBI->connect to die but you can test $h->err eq "" after connect and obtain the informational diagnostics from errstr/state if you want them. Fixed problem in 41Unicode.t where utf8 was used before testing we had a recent enough Perl - thank you cpan testers. Changed "our" back to "my" in Makefile.PL - thank you cpan testers. Removed all calls to DBIh_EVENT2 in dbdimp.c as it is no longer used (see posts on dbi-dev). Changed text output when a driver manager is not found to stop referring to iodbcsrc which is no longer included with DBD::ODBC. Changed Makefile.PL to attempt to find unixODBC if -o or ODBCHOME not specified. Updated META.yml based on new 1.2 spec. Changed Makefile.PL so if an ODBC driver manager is not found then we issue warning and exit cleanly but not generating a Makefile. This should stop cpan-testers from flagging a fail because they haven't got an ODBC driver manager. Changed Makefile.PL so it no longer "use"s DBI/DBI::DBD because this makes cpan-testers log a fail if DBI is not installed. Changed to put the DBI::DBD use in the CONFIGURE sub so PREREQ_PM will filter out machines where DBI is not installed. Fix a possible typo, used once in 10handler.t. 1.15_1 2007-11-06 Minor changes to 20SqlServer.t test for SQL Server 2008 (Katmai). Timestamps now return an extra 4 digits of precision (all 0000) and the driver reported in dbcc messages has a '.' in the version which was not handled. New FAQ entry and test code for "Datetime field overflow" problem in Oracle. Changed all ODBC code to use new SQLLEN/SQLULEN types where Microsoft's headers indicate, principally so DBD::ODBC builds and works on win64. NOTE: you will need an ODBC Driver Manager on UNIX which knows SQLLEN/SQLULEN types. The unixODBC driver manager uses SQLLEN/SQLULEN in versions from at least 2.2.12. Thanks to Nelson Oliveira for finding, patching and testing this and then fixing problems with bound parameters on 64 bit Windows. Added private_attribute_info method DBI introduced (see DBI docs) and test cases to 02simple.t. Fairly major changes to dbd_describe in dbdimp.c to reduce ODBC calls by 1 SQLDescribeCol call per column when describing result sets. Instead of calculating the amount of memory required to hold every column name we work on the basis that (num_columns + 1) * SQL_MAX_COLUMN_NAME_LEN can hold all column names. However, to avoid using a large amount of memory unnecessarily if an ODBC driver supports massive column name lengths the maximum size per column is restricted to 256. Changed to avoid using explicit use of DBIc_ERRXXX in favour of newish (ok, DBD::ODBC is a bit behind the times in this respect) DBIh_SET_ERR_CHAR. This involved a reworking or the error handling and although all test cases still pass I cannot guarantee it has no other effects - please let me know if you spot differences in error messages. Fixed bug in 20SqlServer test for multiple results that was passing but for the wrong reason (namely, that the odbc_err_handler was being called when it should not have been). Fixed bug in odbc_err_handler that prevented it from being reset so you don't have an error handler. Looks like the problem was in dbd_db_STORE_attrib where "if(valuesv == &PL_sv_undef)" was used to detect undef and I think it should be "if (!SvOK(valuesv))". Improvements to odbc_err_handler documentation. Added 10handler.t test cases. More tests in 02simple.t to check NUM_OF_FIELDS and NAMES_uc. Bit of a tidy up: Removed some unused variable declarations from dbdimp.c. Lots of changes to DBD::ODBC tracing, particularly in dbd_describe, and dbd_error2 and login6. Removed a lot of tracing code in comments or #if 0 as it never gets built. Changed dual tests on SQL_SUCCESS and SQL_SUCCESS_WITH_INFO to use SQL_SUCCEEDED. 1.14 2007-07-17 Fix bug reported where ping crashes after disconnect thanks to Steffen Goeldner. Fix bug in dbd_bind_ph which leads to the error Can't change param 1 maxlen (51->50) after first bind in the 20SqlServer test. This is caused by svGROW in Perl 5.8.8 being changed to possibly grow by more than you asked (e.g. up to the next longword boundary). Fix problem with binding undef as an output parameter. Reported by Stephen More with IBM's ODBC driver for iSeries. Removed comment delimiters in comments in dbdimp.h leading to warnings. Removed some unused variable declarations leading to warnings. Removed PerlIO_flush calls as it is believed they are not required. Add logging for whether SQLDescribeParam is supported. Fixed use of unitialised variable in dbd_bind_ph where an undef is bound and tracing is enabled. Fixed issue with TRACESTATUS change in 20SqlServer.t tests 28, 31, 32 and 33 leading to those tests failing when testing with SQL Server 2005 or Express. Many compiler warnings fixed - especially for incompatible types. Add provisional Unicode support - thanks to Alexander Foken. This change is very experimental (especially on UNIX). Please see ODBC.pm documentation. Also see README.unicode and README.af. New database attribute odbc_has_unicode to test if DBD::ODBC was built with UNICODE support. New tests for Unicode. New requirement for Perl 5.8.1 if Unicode support required. New -[no]u argument to Makefile.PL. New warning in Makefile.PL if Unicode support built for UNIX. Fix use of unitialised var in Makefile.PL. Fix use of scalar with no effect on Makefile.PL Added warning to Makefile.PL about building/running with LANG using UTF8. Added warning to Makefile.PL about using thread-safe ODBC drivers. Updated MANIFEST to include more test code from mytest and remove MANIFEST.SKIP etc. Removed calls to get ODBC errors when SQLMoreResults returns SQL_NO_DATA. These are a waste of time since SQL_NO_DATA is expected and there is no error diagnostic to retrieve. Changes to test 17 of 02simple.t which got "not ok 17 - Col count matches correct col count" errors with some Postgres ODBC drivers. Caused by test expecting column names to come back uppercase. Fixes by uppercasing returned column names. Changes to tests in 03batt.t which correctly expects an ODBC 3 driver to return the column names in SQLTables result-set as per ODBC 3.0 spec. Postgres which reports itself as an ODBC 3.0 driver seems to return the ODBC 2 defined column names. Changed tests to catch ODBC 2.0 names are pass test put issue warning. For postgres skip test (with warning) checking $sth->{NAME} returns empty listafter execute on update Updated FAQ, added a few more questions etc. DBD::ODBC requires at least 5.6.0 of Perl. Many updates to pod documentation. Removed some dead HTTP links in the pod I could not find equivalents for - let me know if you have working replacements for ones removed Add some HTTP links to useful tutorials on DBD::ODBC 1.13 2004-11-08 Fix inconsistency/bug with odbc_exec_direct vs. odbc_execdirect settings. Now made consistent with odbc_exec_direct. For now, will still look for odbc_execdirect in prepare, but not as DBH attribute as a backup (which is what it was doing), but that support will be dropped at some time in the future. Please use odbc_exec_direct from now on... Fix handling of print statements for SQL Server thanks to Martin Evans! Thanks for all your work on this! Due to bug in SQL Server, you must use odbc_exec_direct. See t/20SqlServer.t for example. You will need to call $sth->{odbc_more_results} to clear out any trailing messages. Change tests to use Test::More. Whew, that's much nicer! Fix Oracle integral/numeric output params so that warning not printed about value not being numeric (even though it is!) 1.12 2004-10-26 Fix bug with odbc_execdirect attributed thanks to Martin Evans Fix bug(s) with odbc_query_timeout and tested with SQL*Server. Oracle tests failed with setting timeout. Probably not handled by Oracle's ODBC driver 1.11 2004-10-11 Added odbc_timeout, but untested 1.10 2004-09-08 Fixed bug in Makefile.PL. Added pod.t test, taken from DBI. Fixed various small POD issues, discovered during the pod test. Fixed bug in bind_param_inout 1.09 2004-03-10 Duh. I forgot to add new dbivport.h to MANIFEST and SVN before submitting. Fixed. 1.08 2004-03-06 Added check in Makefile.PL to detect if the environment variable LANG is Set. If so, prints a warning about potential makefile generation issues. Change to use dbivport.h per new DBI spec. Add ability to set the cursor type during the connect. This may allow some servers which do not support multiple concurrent statements to permit them -- tested with SQL Server. Thanks to Martin Busik! See odbc_cursortype information in the ODBC POD. 1.07 2004-02-19 Added to Subversion version control hosted by perl.org. Thanks Robert! See ODBC.pm POD for more information. Added contributing section to ODBC.pm POD -- see more details there! Added parameter to odbc_errhandler for the NativeError -- thanks to Martin Busik. Fix for Makefile.PL not having tab in front of $(NOOP) (Finally). Fix for SQLForeignKeys thanks to Kevin Shepherd. 1.06 2003-06-19 Fixed test in t/02simple.t to skip if the DSN defined by the user has DSN= in it. Added tests for wrong DSN, ensuring the DBI::errstr is appropriately set. Fixed small issue in Makefile.PL for Unix systems thanks to H.Merijn Brand. Update to NOT copy user id and password to connect string if UID or PWD parameter in connect string. Updated Makefile.PL for dmake, per patch by Steffen Goldner. Thanks Steffen! 1.05 2003-03-14 Cleaned up Makefile.PL and added Informix support thanks to Jonathan Leffler (see README.informix) Added nicer error message when attempting to do anything while the database is disconnected. Fixed fetchrow_hashref('NAME_uc | NAME_lc') with odbc_more_results. Added exporter to allow perl -MDBD::ODBC=9999 command line to determine version Fixed for building with DBI 1.33 and greater Removed all C++ style comments Ensured files are in Unix format, with the exception of the README type information and Makefile.PL 1.04 2003-01-24 It seems that case insensitive string comparison with a limit causes problems for multiple platforms. strncmpi, strncasecmp, _strcmpin are all functions hit and it seems to be a hit-or-miss. Hence, I rewrote it to upper case the string then do strncmp, which should be safe...sheesh. A simple thing turned into a headache... 1.03 2003-01-17 Add automatic detection of DRIVER= or DSN= to add user id and password to connect string. 1.02 2003-01-06 Fix to call finish() automatically if execute is re-called in a loop (and test in t/02simple.t to ensure it's fixed) Augmented error message when longs are truncated to help users determine where to look for help. Fixes for build under Win32 with Perl5.8. 1.01 2002-12-09 Forgot to fix require DBI 1.201 in ODBC.pm to work for perl 5.8. Fixed 1.00 2002-12-08 (Please see all changes since version 0.43) Updated Makefile.PL to handle SQL_Wxxx types correctly with unixODBC and linking directly with EasySoft OOB. Note that I could not find where iODBC defines SQL_WLONG_VARCHAR, so I'm not sure it's fixed on all other platforms. Should not have been a problem under Win32... Found that the fix in _18 was only enabled if debug enabled and it broke something else. removed the fix. Updated Makefile.PL to use DBI version 1.21 instead of 1.201 to facilitate builds under latest development versions of Perl. Updated code to use the *greater* of the column display size and the column length for allocating column buffers. This *should* workaround a problem with DBD::ODBC and the Universe database. Added code thanks to Michael Riber to handle SQLExecDirect instead of SQLPrepare. There are two ways to get this: $dbh->prepare($sql, { odbc_execdirect => 1}); and $dbh->{odbc_execdirect} = 1; When $dbh->prepare() is called with the attribute "ExecDirect" set to a non-zero value dbd_st_prepare do NOT call SQLPrepare, but set the sth flag odbc_exec_direct to 1. Fixed numeric value binding when binding non-integral values. Now lets the driver or the database handle the conversion. Fixed makefile.pl generation of makefile to force the ODBC directory first in the include list to help those installing ODBC driver managers on systems which already have ODBC drivers in their standard include path. 0.45_18 2002-09-26 Updated MANIFEST to include more of the mytest/* files (examples, tests) Fixed problem when attempting to get NUM_OF_FIELDS after execute returns no rows/columns. 0.45_17 2002-08-26 More fixes for multiple result sets. Needed to clear the DBIc_FIELDS_AV when re-executing the multiple-result set stored procedure/query. 0.45_16 2002-08-26 Updated to fix output parameters with multiple result sets. The output parameters are not set until the last result set has been retrieved. 0.45_15 2002-08-20 Updated for new DBIc_STATE macros (all debug, as it turned out) to be thread safer in the long run Updated for the new DBIc_LOGFP macros Added CLONE method Fix for SQL Server where multiple result sets being returned from a stored proc, where one of the result sets was empty (insert/update). Added new attribute odbc_force_rebind, which forces DBD::ODBC to check recheck for new result sets every execute. This is only really necessary if you have a stored procedure which returns different result sets with each execute, given the same "prepare". Many times this will be automatically set by DBD::ODBC, however, if there is only one result set in the stored proc, but it can differ with each call, then DBD::ODBC will not know to set it. Updated the DBD::ODBC POD documentation to document DBD::ODBC private attributes and usage. 0.45_14 2002-08-13 Added support to handle (better) DBI begin_work(). Fix for binding undef parameters on SQL Server. Fix bug when connecting twice in the same script. Trying to set the environment ODBC version twice against the same henv caused an error. 0.45_13 2002-08-09 Workaround problem with iODBC where SQLAllocHandleStd is not present in iODBC. Made Changes file accessible via perldoc DBD::ODBC::Changes. In the near future the change log will be removed from here and put in changes to tidy up a bit. 0.45_12 2002-08-09 Fixed global destruction access violation (which was seemingly random). 0.45_11 2002-08-08 Updated manifest to include more samples. Working on checking for leaks on Linux, where I might get more information about the process memory. Working on fixing problems with MS SQL Server binding parameters. It seems that SQLServer gets "confused" if you bind a NULL first. In "older" (SQLServer 2000 initial release) versions of the driver, it would truncate char fields. In "newer" versions of the SQL Server driver, it seems to only truncate dates (actually, round them to the nearest minute). If you have problems in the SQL Server tests, please upgrade your driver to the latest version on Microsoft's website (MDAC 2.7 or above) http://www.microsoft.com/data 0.45_10 2002-07-30 Added database specific tests to ensure things are working. Some of the tests may not work for all people or may not be desirable. I have tried to keep them as safe as possible, but if they don't work, please let me know. Added support for the internal function GetFunctions to handle ODBC 3's SQL_API_ODBC3_ALL_FUNCTIONS. Would have caused a memory overwrite on the stack if it was called. 0.45_9 2002-07-30 Fixed bug in procedure handling for SQLServer. Was not re-describing the result sets if the SQLMoreResults in the execute needs to be called. 0.45_8 2002-07-25 Fixed bug in tracing code when binding an undef parameter which did not happen to have a valid buffer with tracing level >= 2 Fixed bug when binding undef after a valid data bind on a timestamp. The Scale value was being calculated based upon the string that had been bound prior to the bind of the undef and if that had a sub-second value, then the scale would be set to the wrong value...I.e. bind_param(1, '2000-05-17 00:01:00.250', SQL_TYPE_TIMESTAMP) then execute bind_param(1, undef, SQL_TYPE_TIMESTAMP) then Fixed SQL Server issue when binding a null and the length was set to 0 instead of 1 0.45_7 2002-07-25 Adding support for array binding, but not finished. Fixed bug where SqlServer Stored procedures which perform INSERT would not correctly return a result set. Thanks to Joe Tebelskis for finding it and Martin Evans for supplying a fix. Fixed bug where binding the empty string would cuase a problem. Fixed and added test in t/07bind.t. 0.45_6 2002-07-24 Added support for new DBI ParamValues feature. 0.45_5 2002-07-23 Added odbc_err_handler and odbc_async_exec thanks to patches by David L. Good. See example in mytest/testerrhandler.pl Here's the notes about it: I've implemented two separate functions. The first is an "error handler" similar to that in DBD::Sybase. The error handler can be used to intercept error and status messages from the server. It is the only way (at least currently) that you can retrieve non-error status messages when execution is successful. To use the error handler, set the "odbc_err_handler" attribute on your database handle to a reference to a subroutine that will act as the error handler. This subroutine will be passed two args, the SQLSTATE and the error message. If the subroutine returns 0, the error message will be otherwise ignored. If it returns non-zero, the error message will be processed normally. The second function implemented is asynchronous execution. It's only useful for retrieving server messages with an error handler during an execute() that takes a long time (such as a DBCC on a large database) ODBC doesn't have the concept of a callback routine like Sybase's DBlib/CTlib does, so asynchronous execution is needed to be able to get the server messages before the SQL statement is done processing. To use asynchronous execution, set the "odbc_async_exec" attribute on your database handle to 1. Not all ODBC drivers support asynchronous execution. To see if yours does, set odbc_async_exec to 1 and then check it's value. If the value is 1, your ODBC driver can do asynchronous execution. If the value is 0, your ODBC driver cannot. 0.45_4 2002-07-22 More fixes for DB2 tests and timestamp handling. 0.45_3 2002-07-22 Changes to internal timestamp type handling and test structure to ensure tests work for all platforms. DB2 was giving me fits due to bad assumptions. Thanks to Martin Evans (again) for help in identifying the problems and helping research solutions. This includes the scale/precision values to correctly store full timestamps. 0.45_2 2002-07-19 Moving API usage to ODBC 3.0 specifications. With lots of help from Martin Evans (again!). Thanks Martin!!!!! 0.44 2002-07-18 .44 was never officially released. Fix for do() and execute to handle DB2 correctly. Patch/discovery thanks to Martin Evans. Partly moving towards defaulting to ODBC 3.x standards. 0.43 2002-07-18 Fix for FoxPro (and potentially other) Drivers!!!!! Add support for DBI column_info Fix for binding undef value which comes from dereferencing hash Fix to make all bound columns word (int) aligned in the buffer. 0.42 2002-07-08 Added patches to the tests to support ActiveState's automated build process. Fix ping() to try SQLTables for a test, instead of a strange query. 0.41 2002-04-15 Fixed problem where SQLDescribeParam would fail (probably bug in ODBC driver). Now reverts to SQL_VARCHAR if that happens, instead of failing the query. Fixed error report when using Oracle's driver. There is a known problem. Left the error on the test, but added warning indicating it's a known problem. 0.40 2002-04-12 Most significant change is the change in the default binding type which forces DBD::ODBC to attempt to determine the bind type if one is not passed. I decided to make this the default behaviour to make things as simple as possible. Fixed connection code put in 0.39 to work correctly. Two minor patches for building, one for Cygwin one if both iODBC and unixODBC libraries are installed. Probably need better command line on this, but if someone has the problem, please let me know (and hopefully send a patch with it). 0.39 2002-03-12 See mytest/longbin.pl for demonstration of inserting and retrieving long binary files to/from the db. Uses MD5 algorithm to verify data. Please do some similar test(s) with your database before using it in production. The various bind types are different for each database! Finally removed distribution of old iODBC. See www.iodbc.org or www.unixodbc.org for newer/better versions of the ODBC driver manager for Unix (and others?). Added ability to force ODBC environment version. Fix to SQLColAttributes. Changes to connect sequence to provide better error messages for those using DSN-less connections. 0.38 2002-02-12 Fixed do function (again) thanks to work by Martin Evans. 0.37 2002-02-10 Patches for get_info where return type is string. Patches thanks to Steffen Goldner. Thanks Steffen! Patched get_info to NOT attempt to get data for SQL_DRIVER_HSTMT and SQL_DRIVER_HDESC as they expect data in and have limited value (IMHO). Further fixed build for ODBC 2.x drivers. The new SQLExecDirect code had SQLAllocHandle which is a 3.x function, not a 2.x function. Sigh. I should have caught that the first time. Signed, the Mad-and- not-thorough-enough-patcher. Additionally, a random core dump occurred in the tests, based upon the new SQLExecDirect code. This has been fixed. 0.36 2002-02-10 Fixed build for ODBC 2.x drivers. The new SQLExecDirect code had SQLFreeHandle which is a 3.x function, not a 2.x function. 0.35 2002-02-09 Fixed (finally) multiple result sets with differing numbers of columns. The final fix was to call SQLFreeStmt(SQL_UNBIND) before repreparing the statement for the next query. Added more to the multi-statement tests to ensure the data retrieved was what was expected. Now, DBD::ODBC overrides DBI's do to call SQLExecDirect for simple statements (those without parameters). Please advise if you run into problems. Hopefully, this will provide some small speed improvement for simple "do" statements. You can also call $dbh->func($stmt, ExecDirect). I'm not sure this has great value unless you need to ensure SQLExecDirect is being called. Patches thanks to Merijn Broeren. Thanks Merijn! 0.34 2002-02-07 Further revamped tests to attempt to determine if SQLDescribeParam will work to handle the binding types. The t/08bind.t attempts to determine if SQLDescribeParam is supported. note that Oracle's ODBC driver under NT doesn't work correctly when binding dates using the ODBC date formatting {d } or {ts }. So, test #3 will fail in t/08bind.t New support for primary_key_info thanks to patches by Martin Evans. New support for catalog, schema, table and table_type in table_info thanks to Martin Evans. Thanks Martin for your work and your continuing testing, suggestions and general support! Support for upcoming dbi get_info. 0.33_3 2002-02-04 Revamped tests to include tests for multiple result sets. The tests are ODBC driver platform specific and will be skipped for drivers which do not support multiple result sets. 0.33_2 2002-02-04 Finally tested new binding techniques with SQL Server 2000, but there is a nice little bug in their MDAC and ODBC drivers according to the knowledge base article # Q273813, titled "FIX: "Incorrect Syntax near the Keyword 'by' " Error Message with Column Names of "C", "CA" or "CAS" (Q273813) DBD::ODBC now does not name any of the columns A, B, C, or D they are now COL_A, COL_B, COL_C, COL_D. *** NOTE: *** I AM STRONGLY CONSIDERING MAKING THE NEW BINDING the default for future versions. I do not believe it will break much existing code (if any) as anyone binding to non VARCHAR (without the ODBC driver doing a good conversion from the VARCHAR) will have a problem. It may be subtle, however, since much code will work, but say, binding dates may not with some drivers. Please comment soon... 0.33_1 2002-02-04 *** WARNING: *** Changes to the binding code to allow the use of SQLDescribeParam to determine if the type of column being bound. This is experimental and activated by setting $dbh->{odbc_default_bind_type} = 0; # before creating the query... Currently the default value of odbc_default_bind_type = SQL_VARCHAR which mimicks the current behaviour. If you set odbc_default_bind_type to 0, then SQLDescribeParam will be called to determine the columen type. Not ALL databases handle this correctly. For example, Oracle returns SQL_VARCHAR for all types and attempts to convert to the correct type for us. However, if you use the ODBC escaped date/time format such as: {ts '1998-05-13 00:01:00'} then Oracle complains. If you bind this with a SQL_TIMESTAMP type, however, Oracle's ODBC driver will parse the time/date correctly. Use at your own risk! Fix to dbdimp.c to allow quoted identifiers to begin/end with either " or '. The following will not be treated as if they have a bind placeholder: "isEstimated?" '01-JAN-1987 00:00:00' 'Does anyone insert a ?' 0.32 2002-01-22 More SAP patches to Makfile.PL to eliminate the call to Data Sources A patch to the test (for SAP and potentially others), to allow fallback to SQL_TYPE_DATE in the tests 0.31 2002-01-18 Added SAP patches to build directly against SAP driver instead of driver manager thanks to Flemming Frandsen (thanks!) Added support to fix ping for Oracle8. May break other databases, so please report this as soon as possible. The downside is that we need to actually execute the dummy query. 0.30 2002-01-08 Added ping patch for Solid courtesy of Marko Asplund Updated disconnect to rollback if autocommit is not on. This should silence some errors when disconnecting. Updated SQL_ROWSET_SIZE attribute. Needed to force it to odbc_SQL_ROWSET_SIZE to obey the DBI rules. Added odbc_SQL_DRIVER_ODBC_VER, which obtains the version of the Driver upon connect. This internal capture of the version is a read-only attribute and is used during array binding of parameters. Added odbc_ignore_named_placeholders attribute to facilicate creating triggers within SAPDB and Oracle, to name two. The syntax in these DBs is to allow use of :old and :new to access column values before and after updates. Example: $dbh->{odbc_ignore_named_placeholders} = 1; # set it for all future statements # ignores :foo, :new, etc, but not :1 or ? $dbh->do("create or replace etc :new.D = sysdate etc"); 0.29 2001-08-22 Cygwin patches from Neil Lunn (untested by me). Thanks Neil! SQL_ROWSET_SIZE attribute patch from Andrew Brown There are only 2 additional lines allowing for the setting of SQL_ROWSET_SIZE as db handle option. The purpose to my madness is simple. SqlServer (7 anyway) by default supports only one select statement at once (using std ODBC cursors). According to the SqlServer documentation you can alter the default setting of three values to force the use of server cursors - in which case multiple selects are possible. The code change allows for: $dbh->{SQL_ROWSET_SIZE} = 2; # Any value > 1 For this very purpose. The setting of SQL_ROWSET_SIZE only affects the extended fetch command as far as I can work out and thus setting this option shouldn't affect DBD::ODBC operations directly in any way. Andrew VMS and other patches from Martin Evans (thanks!) [1] a fix for Makefile.PL to build DBD::ODBC on OpenVMS. [2] fix trace message coredumping after SQLDriverConnect [3] fix call to SQLCancel which fails to pass the statement handle properly. [4] consume diagnostics after SQLDriverConnect/SQLConnect call or they remain until the next error occurs and it then looks confusing (this is due to ODBC spec for SQLError). e.g. test 02simple returns a data truncated error only now instead of all the informational diags that are left from the connect call, like the "database changed", "language changed" messages you get from MS SQL Server. Replaced C++ style comments with C style to support more platforms more easily. Fixed bug which use the single quote (') instead of a double quote (") for "literal" column names. This helped when having a colon (:) in the column name. Fixed bug which would cause DBD::ODBC to core-dump (crash) if DBI tracing level was greater than 3. Fixed problem where ODBC.pm would have "use of uninitialized variable" if calling DBI's type_info. Fixed problem where ODBC.xs *may* have an overrun when calling SQLDataSources. Fixed problem with DBI 1.14, where fprintf was being called instead of PerlIO_printf for debug information Fixed problem building with unixODBC per patch from Nick Gorham Added ability to bind_param_inout() via patches from Jeremy Cooper. Haven't figured out a good, non-db specific way to test. My current test platform attempts to determine the connected database type via ugly hacks and will test, if it thinks it can. Feel free to patch and send me something...Also, my current Oracle ODBC driver fails miserably and dies. Updated t/02simple.t to not print an error, when there is not one. 0.28 2000-03-23 Added support for SQLSpecialColumns thanks to patch provided by Martin J. Evans Fixed bug introduced in 0.26 which was introduced of SQLMoreResults was not supported by the driver. 0.27 2000-03-08 Examined patch for ping method to repair problem reported by Chris Bezil. Thanks Chris! Added simple test for ping method working which should identify this in the future. 0.26 2000-03-05 Put in patch for returning only positive rowcounts from dbd_st_execute. The original patch was submitted by Jon Smirl and put back in by David Good. Reasoning seems sound, so I put it back in. However, any databases that return negative rowcounts for specific reasons, will no longer do so. Put in David Good's patch for multiple result sets. Thanks David! See mytest\moreresults.pl for an example of usage. Added readme.txt in iodbcsrc explaining an issue there with iODBC 2.50.3 and C. Put in rudimentary cancel support via SQLCancel. Call $sth->cencel to utilize. However, it is largely untested by me, as I do not have a good sample for this yet. It may come in handy with threaded perl, someday or it may work in a signal handler. 0.25 2000-03-04 Added conditional compilation for SQL_WVARCHAR and SQL_WLONGVARCHAR. If they are not defined by your driver manager, they will not be compiled in to the code. If you would like to support these types on some platforms, you may be able to #define SQL_WVARCHAR (-9) #define SQL_WLONGVARCHAR (-10) Added more long tests with binding in t\09bind.t. Note use of bind_param! 0.24 2000-02-24 Fixed Test #13 in 02simple.t. Would fail, improperly, if there was only one data source defined. Fixed (hopefully) SQL Server 7 and ntext type "Out of Memory!" errors via patch from Thomas Lowery. Thanks Thomas! Added more support for Solid to handle the fact that it does not support data_sources nor SQLDriverConnect. Patch supplied by Samuli Karkkainen [skarkkai@woods.iki.fi]. Thanks! It's untested by me, however. Added some information from Adam Curtin about a bug in iodbc 2.50.3's data_sources. See iodbcsrc\readme.txt. Added information in this pod from Stephen Arehart regarding DSNLess connections. Added fix for sp_prepare/sp_execute bug reported by Paul G. Weiss. Added some code for handling a hint on disconnect where the user gets an error for not committing. 0.22 1999-09-08 Fixed for threaded perl builds. Note that this was tested only on Win32, with no threads in use and using DBI 1.13. Note, for ActiveState/PERL_OBJECT builds, DBI 1.13_01 is required as of 9/8/99. If you are using ActiveState's perl, this can be installed by using PPM. 0.21 Unknown Thanks to all who provided patches! Added ability to connect to an ODBC source without prior creation of DSN. See mytest/contest.pl for example with MS Access. (Also note that you will need documentation for your ODBC driver -- which, sadly, can be difficult to find). Fixed case sensitivity in tests. Hopefully fixed test #4 in t/09bind.t. Updated it to insert the date column and updated it to find the right type of the column. However, it doesn't seem to work on my Linux test machine, using the OpenLink drivers with MS-SQL Server (6.5). It complains about binding the date time. The same test works under Win32 with SQL Server 6.5, Oracle 8.0.3 and MS Access 97 ODBC drivers. Hmmph. Fixed some binary type issues (patches from Jon Smirl) Added SQLStatistics, SQLForeignKeys, SQLPrimaryKeys (patches from Jon Smirl) Thanks (again), Jon, for providing the build_results function to help reduce duplicate code! Worked on LongTruncOk for Openlink drivers. Note: those trying to bind variables need to remember that you should use the following syntax: use DBI; ... $sth->bind_param(1, $str, DBI::SQL_LONGVARCHAR); Added support for unixodbc (per Nick Gorham) Added support for OpenLinks udbc (per Patrick van Kleef) Added Support for esodbc (per Martin Evans) Added Support for Easysoft (per Bob Kline) Changed table_info to produce a list of views, too. Fixed bug in SQLColumns call. Fixed blob handling via patches from Jochen Wiedmann. Added data_sources capability via snarfing code from DBD::Adabas (Jochen Wiedmann) 0.20 1998-08-14 SQLColAttributes fixes for SQL Server and MySQL. Fixed tables method by renaming to new table_info method. Added new tyoe_info_all method. Improved Makefile.PL support for Adabase. 0.19 Unknown Added iODBC source code to distribution.Fall-back to using iODBC header files in some cases. 0.18 Unknown Enhancements to build process. Better handling of errors in error handling code. 0.17 Unknown This release is mostly due to the good work of Jeff Urlwin. My eternal thanks to you Jeff. Fixed "SQLNumResultCols err" on joins and 'order by' with some drivers (see Microsoft Knowledge Base article #Q124899). Thanks to Paul O'Fallon for that one. Added more (probably incomplete) support for unix ODBC in Makefile.PL Increased default SQL_COLUMN_DISPLAY_SIZE and SQL_COLUMN_LENGTH to 2000 for drivers that don't provide a way to query them dynamically. Was 100! When fetch reaches the end-of-data it automatically frees the internal ODBC statement handle and marks the DBI statement handle as inactive (thus an explicit 'finish' is *not* required). Also: LongTruncOk for Oracle ODBC (where fbh->datalen < 0) Added tracing into SQLBindParameter (help diagnose oracle odbc bug) Fixed/worked around bug/result from Latest Oracle ODBC driver where in SQLColAttribute cbInfoValue was changed to 0 to indicate fDesc had a value Added work around for compiling w/ActiveState PRK (PERL_OBJECT) Updated tests to include date insert and type Added more "backup" SQL_xxx types for tests Updated bind test to test binding select NOTE: bind insert fails on Paradox driver (don't know why) Added support for: (see notes below) SQLGetInfo via $dbh->func(xxx, GetInfo) SQLGetTypeInfo via $dbh->func(xxx, GetTypeInfo) SQLDescribeCol via $sth->func(colno, DescribeCol) SQLColAttributes via $sth->func(xxx, colno, ColAttributes) SQLGetFunctions via $dbh->func(xxx, GetFunctions) SQLColumns via $dbh->func(catalog, schema, table, column, 'columns') Fixed $DBI::err to reflect the real ODBC error code which is a 5 char code, not necessarily numeric. Fixed fetches when LongTruncOk == 1. Updated tests to pass more often (hopefully 100% ) Updated tests to test long reading, inserting and the LongTruncOk attribute. Updated tests to be less driver specific. They now rely upon SQLGetTypeInfo I in order to create the tables. The test use this function to "ask" the driver for the name of the SQL type to correctly create long, varchar, etc types. For example, in Oracle the SQL_VARCHAR type is VARCHAR2, while MS Access uses TEXT for the SQL Name. Again, in Oracle the SQL_LONGVARCHAR is LONG, while in Access it's MEMO. The tests currently handle this correctly (at least with Access and Oracle, MS SQL server will be tested also). DBD-ODBC-1.61/ODBC.pm0000644000175000017500000032072013614567601013167 0ustar martinmartin# # Copyright (c) 1994,1995,1996,1998 Tim Bunce # portions Copyright (c) 1997-2004 Jeff Urlwin # portions Copyright (c) 1997 Thomas K. Wenrich # portions Copyright (c) 2007-2014 Martin J. Evans # # You may distribute under the terms of either the GNU General Public # License or the Artistic License, as specified in the Perl README file. ## no critic (ProhibitManyArgs ProhibitMultiplePackages) require 5.008; # NOTE: Don't forget to update the version reference in the POD below too. # NOTE: If you create a developer release x.y_z ensure y is greater than # the preceding y in the non developer release e.g., 1.24 should be followed # by 1.25_1 and then released as 1.26. # see discussion on dbi-users at # http://www.nntp.perl.org/group/perl.dbi.dev/2010/07/msg6096.html and # http://www.dagolden.com/index.php/369/version-numbers-should-be-boring/ $DBD::ODBC::VERSION = '1.61'; { ## no critic (ProhibitMagicNumbers ProhibitExplicitISA) ## no critic (ProhibitPackageVars) package DBD::ODBC; use DBI (); use DynaLoader (); use Exporter (); @ISA = qw(Exporter DynaLoader); # my $Revision = substr(q$Id$, 13,2); require_version DBI 1.609; bootstrap DBD::ODBC $VERSION; $err = 0; # holds error code for DBI::err $errstr = q{}; # holds error string for DBI::errstr $sqlstate = "00000"; # starting state $drh = undef; # holds driver handle once initialised use constant { # header fields in SQLGetDiagField: SQL_DIAG_CURSOR_ROW_COUNT => -1249, SQL_DIAG_DYNAMIC_FUNCTION => 7, SQL_DIAG_DYNAMIC_FUNCTION_CODE => 12, SQL_DIAG_NUMBER => 2, SQL_DIAG_RETURNCODE => 1, SQL_DIAG_ROW_COUNT => 3, # record fields in SQLGetDiagField: SQL_DIAG_CLASS_ORIGIN => 8, SQL_DIAG_COLUMN_NUMBER => -1247, SQL_DIAG_CONNECTION_NAME => 10, SQL_DIAG_MESSAGE_TEXT => 6, SQL_DIAG_NATIVE => 5, SQL_DIAG_ROW_NUMBER => -1248, SQL_DIAG_SERVER_NAME => 11, SQL_DIAG_SQLSTATE => 4, SQL_DIAG_SUBCLASS_ORIGIN => 9, # TAF constants - these are just copies of Oracle constants # events: OCI_FO_END => 0x00000001, OCI_FO_ABORT => 0x00000002, OCI_FO_REAUTH => 0x00000004, OCI_FO_BEGIN => 0x00000008, OCI_FO_ERROR => 0x00000010, # callback return codes: OCI_FO_RETRY => 25410, # types: OCI_FO_NONE => 0x00000001, OCI_FO_SESSION => 0x00000002, OCI_FO_SELECT => 0x00000004, OCI_FO_TXNAL => 0x00000008 }; our @EXPORT_DIAGS = qw(SQL_DIAG_CURSOR_ROW_COUNT SQL_DIAG_DYNAMIC_FUNCTION SQL_DIAG_DYNAMIC_FUNCTION_CODE SQL_DIAG_NUMBER SQL_DIAG_RETURNCODE SQL_DIAG_ROW_COUNT SQL_DIAG_CLASS_ORIGIN SQL_DIAG_COLUMN_NUMBER SQL_DIAG_CONNECTION_NAME SQL_DIAG_MESSAGE_TEXT SQL_DIAG_NATIVE SQL_DIAG_ROW_NUMBER SQL_DIAG_SERVER_NAME SQL_DIAG_SQLSTATE SQL_DIAG_SUBCLASS_ORIGIN); our @EXPORT_TAF = qw(OCI_FO_END OCI_FO_ABORT OCI_FO_REAUTH OCI_FO_BEGIN OCI_FO_ERROR OCI_FO_RETRY OCI_FO_NONE OCI_FO_SESSION OCI_FO_SELECT OCI_FO_TXNAL); our @EXPORT_OK = (@EXPORT_DIAGS, @EXPORT_TAF); our %EXPORT_TAGS = ( diags => \@EXPORT_DIAGS, taf => \@EXPORT_TAF); sub parse_trace_flag { my ($class, $name) = @_; return 0x02_00_00_00 if $name eq 'odbcunicode'; return 0x04_00_00_00 if $name eq 'odbcconnection'; return DBI::parse_trace_flag($class, $name); } sub parse_trace_flags { my ($class, $flags) = @_; return DBI::parse_trace_flags($class, $flags); } my $methods_are_installed = 0; sub driver{ return $drh if $drh; my($class, $attr) = @_; $class .= "::dr"; # not a 'my' since we use it above to prevent multiple drivers $drh = DBI::_new_drh($class, { 'Name' => 'ODBC', 'Version' => $VERSION, 'Err' => \$DBD::ODBC::err, 'Errstr' => \$DBD::ODBC::errstr, 'State' => \$DBD::ODBC::sqlstate, 'Attribution' => 'DBD::ODBC by Jeff Urlwin, Tim Bunce and Martin J. Evans', }); if (!$methods_are_installed) { DBD::ODBC::st->install_method("odbc_lob_read"); DBD::ODBC::st->install_method("odbc_rows", { O=>0x00000000 }); DBD::ODBC::st->install_method("odbc_describe_param", { O=>0x00000000 }); # don't clear errors - IMA_KEEP_ERR = 0x00000004 DBD::ODBC::st->install_method("odbc_getdiagrec", { O=>0x00000004 }); DBD::ODBC::db->install_method("odbc_getdiagrec", { O=>0x00000004 }); DBD::ODBC::db->install_method("odbc_getdiagfield", { O=>0x00000004 }); DBD::ODBC::st->install_method("odbc_getdiagfield", { O=>0x00000004 }); $methods_are_installed++; } return $drh; } sub CLONE { undef $drh } 1; } { package DBD::ODBC::dr; # ====== DRIVER ====== use strict; use warnings; ## no critic (ProhibitBuiltinHomonyms) sub connect { my($drh, $dbname, $user, $auth, $attr)= @_; #$user = q{} unless defined $user; #$auth = q{} unless defined $auth; # create a 'blank' dbh my $this = DBI::_new_dbh($drh, { 'Name' => $dbname, 'USER' => $user, 'CURRENT_USER' => $user, }); # Call ODBC _login func in Driver.xst file => dbd_db_login6 # and populate internal handle data. # There are 3 versions (currently) if you have a recent DBI: # dbd_db_login (oldest) # dbd_db_login6 (with attribs hash & char * args) and # dbd_db_login6_sv (as dbd_db_login6 with perl scalar args DBD::ODBC::db::_login($this, $dbname, $user, $auth, $attr) or return; return $this; } ## use critic sub data_sources { my ($drh, $attr) = @_; my $dsref = DBD::ODBC::dr::_data_sources( $drh, $attr ); if( defined( $dsref ) && ref( $dsref ) eq "ARRAY" ) { return @$dsref; } return (); # Return empty array } } { package DBD::ODBC::db; # ====== DATABASE ====== use strict; use warnings; use constant SQL_DRIVER_HSTMT => 5; use constant SQL_DRIVER_HLIB => 76; use constant SQL_DRIVER_HDESC => 135; sub parse_trace_flag { my ($h, $name) = @_; return DBD::ODBC->parse_trace_flag($name); } sub private_attribute_info { return { odbc_ignore_named_placeholders => undef, # sth and dbh odbc_default_bind_type => undef, # sth and dbh odbc_force_bind_type => undef, # sth and dbh odbc_force_rebind => undef, # sth and dbh odbc_async_exec => undef, # sth and dbh odbc_exec_direct => undef, odbc_describe_parameters => undef, odbc_SQL_ROWSET_SIZE => undef, odbc_SQL_DRIVER_ODBC_VER => undef, odbc_cursortype => undef, odbc_query_timeout => undef, # sth and dbh odbc_has_unicode => undef, odbc_out_connect_string => undef, odbc_version => undef, odbc_err_handler => undef, odbc_putdata_start => undef, # sth and dbh odbc_column_display_size => undef, # sth and dbh odbc_utf8_on => undef, # sth and dbh odbc_driver_complete => undef, odbc_batch_size => undef, odbc_array_operations => undef, # sth and dbh odbc_taf_callback => undef, odbc_trace => undef, # dbh odbc_trace_file => undef, # dbh }; } sub prepare { my($dbh, $statement, @attribs)= @_; # create a 'blank' sth my $sth = DBI::_new_sth($dbh, { 'Statement' => $statement, }); # Call ODBC func in ODBC.xs file. # (This will actually also call SQLPrepare for you.) # and populate internal handle data. DBD::ODBC::st::_prepare($sth, $statement, @attribs) or return; return $sth; } sub column_info { my ($dbh, $catalog, $schema, $table, $column) = @_; $catalog = q{} if (!$catalog); $schema = q{} if (!$schema); $table = q{} if (!$table); $column = q{} if (!$column); # create a "blank" statement handle my $sth = DBI::_new_sth($dbh, { 'Statement' => "SQLColumns" }); _columns($dbh,$sth, $catalog, $schema, $table, $column) or return; return $sth; } sub columns { my ($dbh, $catalog, $schema, $table, $column) = @_; $catalog = q{} if (!$catalog); $schema = q{} if (!$schema); $table = q{} if (!$table); $column = q{} if (!$column); # create a "blank" statement handle my $sth = DBI::_new_sth($dbh, { 'Statement' => "SQLColumns" }); _columns($dbh,$sth, $catalog, $schema, $table, $column) or return; return $sth; } sub table_info { my ($dbh, $catalog, $schema, $table, $type) = @_; if ($#_ == 1) { my $attrs = $_[1]; $catalog = $attrs->{TABLE_CAT}; $schema = $attrs->{TABLE_SCHEM}; $table = $attrs->{TABLE_NAME}; $type = $attrs->{TABLE_TYPE}; } # the following was causing a problem # changing undef to '' makes a big difference to SQLTables # as SQLTables has special cases for empty string calls #$catalog = q{} if (!$catalog); #$schema = q{} if (!$schema); #$table = q{} if (!$table); #$type = q{} if (!$type); # create a "blank" statement handle my $sth = DBI::_new_sth($dbh, { 'Statement' => "SQLTables" }); DBD::ODBC::st::_tables($dbh,$sth, $catalog, $schema, $table, $type) or return; return $sth; } sub primary_key_info { my ($dbh, $catalog, $schema, $table ) = @_; # create a "blank" statement handle my $sth = DBI::_new_sth($dbh, { 'Statement' => "SQLPrimaryKeys" }); $catalog = q{} if (!$catalog); $schema = q{} if (!$schema); $table = q{} if (!$table); DBD::ODBC::st::_primary_keys($dbh,$sth, $catalog, $schema, $table ) or return; return $sth; } sub statistics_info { my ($dbh, $catalog, $schema, $table, $unique, $quick ) = @_; # create a "blank" statement handle my $sth = DBI::_new_sth($dbh, { 'Statement' => "SQLStatistics" }); $catalog = q{} if (!$catalog); $schema = q{} if (!$schema); $table = q{} if (!$table); $unique = 1 if (!$unique); $quick = 1 if (!$quick); DBD::ODBC::st::_statistics($dbh, $sth, $catalog, $schema, $table, $unique, $quick) or return; return $sth; } sub foreign_key_info { my ($dbh, $pkcatalog, $pkschema, $pktable, $fkcatalog, $fkschema, $fktable ) = @_; # create a "blank" statement handle my $sth = DBI::_new_sth($dbh, { 'Statement' => "SQLForeignKeys" }); $pkcatalog = q{} if (!$pkcatalog); $pkschema = q{} if (!$pkschema); $pktable = q{} if (!$pktable); $fkcatalog = q{} if (!$fkcatalog); $fkschema = q{} if (!$fkschema); $fktable = q{} if (!$fktable); _GetForeignKeys($dbh, $sth, $pkcatalog, $pkschema, $pktable, $fkcatalog, $fkschema, $fktable) or return; return $sth; } sub ping { my $dbh = shift; # DBD::Gofer does the following (with a 0 instead of "0") but it I # cannot make it set a warning. #return $dbh->SUPER::set_err("0", "can't ping while not connected") # warning # unless $dbh->SUPER::FETCH('Active'); #my $pe = $dbh->FETCH('PrintError'); #$dbh->STORE('PrintError', 0); my $evalret = eval { # create a "blank" statement handle my $sth = DBI::_new_sth($dbh, { 'Statement' => "SQLTables_PING" }) or return 1; my ($catalog, $schema, $table, $type); $catalog = q{}; $schema = q{}; $table = 'NOXXTABLE'; $type = q{}; DBD::ODBC::st::_tables($dbh,$sth, $catalog, $schema, $table, $type) or return 1; $sth->finish; return 0; }; #$dbh->STORE('PrintError', $pe); $dbh->set_err(undef,'',''); # clear any stored error from eval above if ($evalret == 0) { return 1; } else { return 0; } } ##### # saved, just for posterity. ##### sub oldping { ##### my $dbh = shift; ##### my $state = undef; ##### ##### # should never 'work' but if it does, that's okay! ##### # JLU incorporated patches from Jon Smirl 5/4/99 ##### { ##### local $dbh->{RaiseError} = 0 if $dbh->{RaiseError}; ##### # JLU added local PrintError handling for completeness. ##### # it shouldn't print, I think. ##### local $dbh->{PrintError} = 0 if $dbh->{PrintError}; ##### my $sql = "select sysdate from dual1__NOT_FOUND__CANNOT"; ##### my $sth = $dbh->prepare($sql); ##### # fixed "my" $state = below. Was causing problem with ##### # ping! Also, fetching fields as some drivers (Oracle 8) ##### # may not actually check the database for activity until ##### # the query is "described". ##### # Right now, Oracle8 is the only known version which ##### # does not actually check the server during prepare. ##### my $ok = $sth && $sth->execute(); ##### ##### $state = $dbh->state; ##### $DBD::ODBC::err = 0; ##### $DBD::ODBC::errstr = ""; ##### $DBD::ODBC::sqlstate = "00000"; ##### return 1 if $ok; ##### } ##### return 1 if $state eq 'S0002'; # Base table not found ##### return 1 if $state eq '42S02'; # Base table not found.Solid EE v3.51 ##### return 1 if $state eq 'S0022'; # Column not found ##### return 1 if $state eq '37000'; # statement could not be prepared (19991011, JLU) ##### # return 1 if $state eq 'S1000'; # General Error? ? 5/30/02, JLU. This is what Openlink is returning ##### # We assume that any other error means the database ##### # is no longer connected. ##### # Some special cases may need to be added to the code above. ##### return 0; ##### } # New support for DBI which has the get_info command. # leaving support for ->func(xxx, GetInfo) (below) for a period of time # to support older applications which used this. sub get_info { my ($dbh, $item) = @_; # Ignore some we cannot do if ($item == SQL_DRIVER_HSTMT || $item == SQL_DRIVER_HLIB || $item == SQL_DRIVER_HDESC) { return; } return _GetInfo($dbh, $item); } # new override of do method provided by Merijn Broeren # this optimizes "do" to use SQLExecDirect for simple # do statements without parameters. ## no critic (ProhibitBuiltinHomonyms) sub do { my($dbh, $statement, $attr, @params) = @_; my $rows = 0; ## no critic (ProhibitMagicNumbers) if( -1 == $#params ) { $dbh->STORE(Statement => $statement); # No parameters, use execute immediate $rows = ExecDirect( $dbh, $statement ); if( 0 == $rows ) { $rows = "0E0"; # 0 but true } elsif( $rows < -1 ) { undef $rows; } } else { $rows = $dbh->SUPER::do( $statement, $attr, @params ); } return $rows } ## use critic # # can also be called as $dbh->func($sql, ExecDirect); # if, for some reason, there are compatibility issues # later with DBI's do. # sub ExecDirect { my ($dbh, $sql) = @_; return _ExecDirect($dbh, $sql); } # Call the ODBC function SQLGetInfo # Args are: # $dbh - the database handle # $item: the requested item. For example, pass 6 for SQL_DRIVER_NAME # See the ODBC documentation for more information about this call. # sub GetInfo { my ($dbh, $item) = @_; return get_info($dbh, $item); } # Call the ODBC function SQLStatistics # Args are: # See the ODBC documentation for more information about this call. # sub GetStatistics { my ($dbh, $catalog, $schema, $table, $unique) = @_; # create a "blank" statement handle my $sth = DBI::_new_sth($dbh, { 'Statement' => "SQLStatistics" }); _GetStatistics($dbh, $sth, $catalog, $schema, $table, $unique) or return; return $sth; } # Call the ODBC function SQLForeignKeys # Args are: # See the ODBC documentation for more information about this call. # sub GetForeignKeys { my ($dbh, $pk_catalog, $pk_schema, $pk_table, $fk_catalog, $fk_schema, $fk_table) = @_; # create a "blank" statement handle my $sth = DBI::_new_sth($dbh, { 'Statement' => "SQLForeignKeys" }); _GetForeignKeys($dbh, $sth, $pk_catalog, $pk_schema, $pk_table, $fk_catalog, $fk_schema, $fk_table) or return; return $sth; } # Call the ODBC function SQLPrimaryKeys # Args are: # See the ODBC documentation for more information about this call. # sub GetPrimaryKeys { my ($dbh, $catalog, $schema, $table) = @_; # create a "blank" statement handle my $sth = DBI::_new_sth($dbh, { 'Statement' => "SQLPrimaryKeys" }); _GetPrimaryKeys($dbh, $sth, $catalog, $schema, $table) or return; return $sth; } # Call the ODBC function SQLSpecialColumns # Args are: # See the ODBC documentation for more information about this call. # sub GetSpecialColumns { my ($dbh, $identifier, $catalog, $schema, $table, $scope, $nullable) = @_; # create a "blank" statement handle my $sth = DBI::_new_sth($dbh, { 'Statement' => "SQLSpecialColumns" }); _GetSpecialColumns($dbh, $sth, $identifier, $catalog, $schema, $table, $scope, $nullable) or return; return $sth; } # sub GetTypeInfo { # my ($dbh, $sqltype) = @_; # # create a "blank" statement handle # my $sth = DBI::_new_sth($dbh, { 'Statement' => "SQLGetTypeInfo" }); # # print "SQL Type is $sqltype\n"; # _GetTypeInfo($dbh, $sth, $sqltype) or return; # return $sth; # } sub type_info_all { my ($dbh, $sqltype) = @_; $sqltype = DBI::SQL_ALL_TYPES unless defined $sqltype; my $sth = DBI::_new_sth($dbh, { 'Statement' => "SQLGetTypeInfo" }); _GetTypeInfo($dbh, $sth, $sqltype) or return; my $info = $sth->fetchall_arrayref; unshift @{$info}, { map { ($sth->{NAME}->[$_] => $_) } 0..$sth->{NUM_OF_FIELDS}-1 }; return $info; } } { package DBD::ODBC::st; # ====== STATEMENT ====== use strict; use warnings; *parse_trace_flag = \&DBD::ODBC::db::parse_trace_flag; sub private_attribute_info { return { odbc_ignore_named_placeholders => undef, # sth and dbh odbc_default_bind_type => undef, # sth and dbh odbc_force_bind_type => undef, # sth and dbh odbc_force_rebind => undef, # sth and dbh odbc_async_exec => undef, # sth and dbh odbc_query_timeout => undef, # sth and dbh odbc_putdata_start => undef, # sth and dbh odbc_column_display_size => undef, # sth and dbh odbc_utf8_on => undef, # sth and dbh odbc_exec_direct => undef, # sth and dbh odbc_describe_parameters => undef, # sth and dbh odbc_batch_size => undef, # sth and dbh odbc_array_operations => undef, # sth and dbh }; } sub ColAttributes { # maps to SQLColAttributes my ($sth, $colno, $desctype) = @_; my $tmp = _ColAttributes($sth, $colno, $desctype); return $tmp; } sub cancel { my $sth = shift; my $tmp = _Cancel($sth); return $tmp; } sub execute_for_fetch { my ($sth, $fetch_tuple_sub, $tuple_status) = @_; #print "execute_for_fetch\n"; my $row_count = 0; my $tuple_count="0E0"; my $tuple_batch_status; my $batch_size = $sth->FETCH('odbc_batch_size'); $sth->trace_msg("execute_for_fetch($fetch_tuple_sub, " . ($tuple_status ? $tuple_status : 'undef') . ") batch_size = $batch_size\n", 4); # Use DBI's execute_for_fetch if ours is disabled my $override = (defined($ENV{ODBC_DISABLE_ARRAY_OPERATIONS}) ? $ENV{ODBC_DISABLE_ARRAY_OPERATIONS} : -1); if ((($sth->FETCH('odbc_array_operations') == 0) && ($override != 0)) || $override == 1) { $sth->trace_msg("array operations disabled\n", 4); my $sth = shift; return $sth->SUPER::execute_for_fetch(@_); } $tuple_batch_status = [ ]; # we always want this here if (defined($tuple_status)) { @$tuple_status = (); } my $finished; while (1) { my @tuple_batch; for (my $i = 0; $i < $batch_size; $i++) { $finished = $fetch_tuple_sub->(); push @tuple_batch, [ @{$finished || last} ]; } $sth->trace_msg("Found " . scalar(@tuple_batch) . " rows\n", 4); last unless @tuple_batch; my $res = odbc_execute_for_fetch($sth, \@tuple_batch, scalar(@tuple_batch), $tuple_batch_status); $sth->trace_msg("odbc_execute_array returns " . ($res ? $res : 'undef') . "\n", 4); #print "odbc_execute_array XS returned $res\n"; # count how many tuples were used # basically they are all used unless marked UNUSED if ($tuple_batch_status) { foreach (@$tuple_batch_status) { $tuple_count++ unless $_ == 7; # SQL_PARAM_UNUSED next if ref($_); $_ = -1; # we don't know individual row counts } if ($tuple_status) { push @$tuple_status, @$tuple_batch_status if defined($tuple_status); } } if (!defined($res)) { # error $row_count = undef; last; } else { $row_count += $res; } last if !$finished; } if (!wantarray) { return undef if !defined $row_count; return $tuple_count; } return (defined $row_count ? $tuple_count : undef, $row_count); } } 1; __END__ =head1 NAME DBD::ODBC - ODBC Driver for DBI =for html CPAN version =head1 VERSION This documentation refers to DBD::ODBC version 1.61. =head1 WARNING This version of DBD::ODBC contains a significant fix to unicode when inserting into CHAR/VARCHAR columns and it is a change in behaviour from 1.45. The change B applies to unicode builds of DBD::ODBC (the default on Windows but you can build it for unicode on unix too) and char/varchar columns and not nchar/nvarchar columns. Prior to this release of DBD::ODBC when you are using the unicode build of DBD::ODBC and inserted data into a CHAR/VARCHAR columns using parameters DBD::ODBC did this: 1 if you set odbc_describe_parameters to 0, (thus preventing DBD::ODBC from calling SQLDescribeParam) parameters for CHAR/VARCHAR columns were bound as SQL_WVARCHAR or SQL_WLONGVARCHAR (depending on the length of the parameter). 2 if you set odbc_force_bind_type then all parameters are bound as you specified. 3 if you override the parameter type in the bind_param method, the type you specified would be used. 4 if the driver does not support SQLDescribeParam or SQLDescribeParam was called and failed then the bind type defaulted as in 1. 5 if none of the above (and I'd guess that is the normal case for most people) then DBD::ODBC calls SQLDescribeParam to find the parameter type. This usually returns SQL_CHAR or SQL_VARCHAR for CHAR/VARCHAR columns unsurprisingly. The parameter was then bound as SQL_VARCHAR. Items 1 to 4 still apply. 5 now has a different behaviour. In this release, DBD::ODBC now looks at your bound data first before using the type returned by SQLDescribeParam. If you data looks like unicode (i.e., SvUTF8() is true) it now binds the parameter as SQL_WVARCHAR. What might this might mean to you? If you had Perl scalars that were bound to CHAR/VARCHAR columns in an insert/update/delete and those scalars contained unicode, DBD::ODBC would actually pass the individual octets in your scalar not characters. For instance, if you had the Perl scalar "\x{20ac}" (the Euro unicode character) and you bound it to a CHAR/VARCHAR, DBD::ODBC would pass 0xe2, 0x82, 0xc2 as separate characters because those bytes were Perl's UTF-8 encoding of a euro. These would probably be interpreted by your database engine as 3 characters in its current codepage. If you queried your database to find the length of the data inserted you'd probably get back 3, not 1. However, when DBD::ODBC read that column back in a select statement, it would bind the column as SQL_WCHAR and you'd get back 3 characters with the utf8 flag on (what those characters were depends on how your database or driver translates code page characters to wide characters). What should happen now is that if your bound parameters are unicode, DBD::ODBC will bind them as wide characters (unicode) and your driver or database will attempt to convert them into the code page it is using. This means so long as your database can store the data you are inserting, when you read it back you should get what you inserted. =head1 SYNOPSIS use DBI; $dbh = DBI->connect('dbi:ODBC:DSN=mydsn', 'user', 'password'); See L for more information. =head1 DESCRIPTION =head2 Change log and FAQs Please note that the change log has been moved to DBD::ODBC::Changes. To access this documentation, use C. The FAQs have also moved to DBD::ODBC::FAQ.pm. To access the FAQs use C. =head2 Important note about the tests DBD::ODBC is unlike most other DBDs in that it connects to literally dozens of possible ODBC Drivers. It is practically impossible for me to test every one and so some tests may fail with some ODBC Drivers. This does not mean DBD::ODBC will not work with your ODBC Driver but it is worth reporting any test failures on rt.cpan.org or to the dbi-users mailing list. =head2 DBI attribute handling If a DBI defined attribute is not mentioned here it behaves as per the DBI specification. =head3 ReadOnly (boolean) DBI documents the C attribute as being settable and retrievable on connection and statement handles. In ODBC setting ReadOnly to true causes the connection attribute C to be set to C and setting it to false will set the access mode to C (which is the default in ODBC). B There is no equivalent of setting ReadOnly on a statement handle in ODBC. B See ODBC documentation on C as setting it to C does B prevent your script from running updates or deletes; it is simply a hint to the driver/database that you won't being doing updates. B Since DBD::ODCB 1.44_3, if the driver does not support setting C and returns SQL_SUCCESS_WITH_INFO and "option value changed" a warning is issued (which you'll only see if you have DBI > 1.628). In addition, any subsequent attempts to fetch the ReadOnly attribute will return the value last set. This attribute requires DBI version 1.55 or better. =head2 Private attributes common to connection and statement handles =head3 odbc_ignore_named_placeholders Use this if you have special needs (such as Oracle triggers, etc) where :new or :name mean something special and are not just place holder names. You B then use ? for binding parameters. Example: $dbh->{odbc_ignore_named_placeholders} = 1; $dbh->do("create trigger foo as if :new.x <> :old.x then ... etc"); Without this, DBD::ODBC will think :new and :old are placeholders for binding and get confused. =head3 odbc_default_bind_type This value defaults to 0. Older versions of DBD::ODBC assumed that the parameter binding type was 12 (C). Newer versions always attempt to call C to find the parameter types but if C is unavailable DBD::ODBC falls back to a default bind type. The internal default bind type is C (for non-unicode build) and C or C (for a unicode build depending on whether the parameter is unicode or not). If you set C to a value other than 0 you override the internal default. B If you call the C method with a SQL type this overrides everything else above. =head3 odbc_force_bind_type This value defaults to 0. If set to anything other than 0 this will force bound parameters to be bound as this type and C will not be used; in other words it implies L is set to false too. Older versions of DBD::ODBC assumed the parameter binding type was 12 (C) and newer versions always attempt to call C to find the parameter types. If your driver supports C and it succeeds it may still fail to describe the parameters accurately (MS SQL Server sometimes does this with some SQL like I to retrieve the last insert ID. See http://support.microsoft.com/kb/815629. Information provided by Robert Freimuth. =head3 Comments in SQL DBI does not say anything in particular about comments in SQL. DBD::ODBC looks for placeholders in the SQL string and until 1.24_2 it did not recognise comments in SQL strings so could find what it believes to be a placeholder in a comment e.g., select '1' /* placeholder ? in comment */ select -- named placeholder :named in comment '1' I cannot be exact about support for ignoring placeholders in literals but it has existed for a long time in DBD::ODBC. Support for ignoring placeholders in comments was added in 1.24_2. If you find a case where a named placeholder is not ignored and should be, see L for a workaround and mail me an example along with your ODBC driver name. =head3 do This is not really a deviation from the DBI specification since DBI allows a driver to avoid the overhead of creating an DBI statement handle for do(). DBD::ODBC implements C by calling SQLExecDirect in ODBC and not SQLPrepare followed by SQLExecute so C is not the same as: $dbh->prepare($sql)->execute() It does this to avoid a round-trip to the server so it is faster. Normally this is good but some people fall foul of this with MS SQL Server if they call a procedure which outputs print statements (e.g., backup) as the procedure may not complete. See the DBD::ODBC FAQ and in general you are better to use prepare/execute when calling procedures. In addition, you should realise that since DBD::ODBC does not create a DBI statement for do calls, if you set up an error handler the handle passed in when a do fails will be the database handle and not a statement handle. =head3 Mixed placeholder types There are 3 conventions for place holders in DBI. These are '?', ':N' and ':name' (where 'N' is a number and 'name' is an alpha numeric string not beginning with a number). DBD::ODBC supports all these methods for naming placeholders but you must only use one method throughout a particular SQL string. If you mix placeholder methods you will get an error like: Can't mix placeholder styles (1/2) =head3 Using the same placeholder more than once DBD::ODBC does not support (currently) the use of one named placeholder more than once in a single SQL string. i.e., insert into foo values (:bar, :p1, :p2, :bar); is not supported because 'bar' is used more than once but: insert into foo values(:bar, :p1, :p2) is ok. If you do the former you will get an error like: DBD::ODBC does not yet support binding a named parameter more than once =head3 Binding named placeholders Although the DBI documentation (as of 1.604) does not say how named parameters are bound Tim Bunce has said that in Oracle they are bound with the leading ':' as part of the name and that has always been the case. i.e., prepare("insert into mytable values (:fred)"); bind_param(":foo", 1); DBD::ODBC does not support binding named parameters with the ':' introducer. In the above example you must use: bind_param("foo", 1); In discussion on the dbi-dev list is was suggested that the ':' could be made optional and there were no basic objections but it has not made it's way into the pod yet. =head3 Sticky Parameter Types The DBI specification post 1.608 says in bind_param: The data type is 'sticky' in that bind values passed to execute() are bound with the data type specified by earlier bind_param() calls, if any. Portable applications should not rely on being able to change the data type after the first bind_param call. DBD::ODBC does allow a parameter to be rebound with another data type as ODBC inherently allows this. Therefore you can do: # parameter 1 set as a SQL_LONGVARCHAR $sth->bind_param(1, $data, DBI::SQL_LONGVARCHAR); # without the bind above the $data parameter would be either a DBD::ODBC # internal default or whatever the ODBC driver said it was but because # parameter types are sticky, the type is still SQL_LONGVARCHAR. $sth->execute($data); # change the bound type to SQL_VARCHAR # some DBDs will ignore the type in the following, DBD::ODBC does not $sth->bind_param(1, $data, DBI::SQL_VARCHAR); =head3 disconnect and transactions DBI does not define whether a driver commits or rolls back any outstanding transaction when disconnect is called. As such DBD::ODBC cannot deviate from the specification but you should know it rolls back an uncommitted transaction when disconnect is called if SQLDisconnect returns state 25000 (transaction in progress). =head3 execute_for_fetch and execute_array From version 1.34_1 DBD::ODBC implements its own execute_for_fetch which binds arrays of parameters and can send multiple rows (L) of parameters through the ODBC driver in one go (this overrides DBI's default execute_for_fetch). This is much faster when inserting, updating or deleting many rows in one go. Note, execute_array uses execute_for_fetch when the parameters are passed for column-wise binding. NOTE: DBD::ODBC 1.34_1 to DBD::ODBC 1.36_1 set the default to use DBD::ODBC's own execute_for_fetch but quite a few ODBC drivers just cannot handle it. As such, from DBD::ODBC 1.36_2 the default was changed to not use DBD::ODBC's execute_for_fetch (i.e., you need to enable it with L). NOTE: Some ODBC drivers don't support setting SQL_ATTR_PARAMSET_SIZE > 1, and hence cannot support binding arrays of parameters. The only way to detect this is to attempt to set SQL_ATTR_PARAMSET_SIZE to a value greater than 1 and it is too late once someone has called execute_for_fetch. I don't want to add test code on each connect to test for this as it will affect everyone, even those not using the native execute_for_fetch so for now it is a suck it and see. For your information MS Access which does not support arrays of parameters errors with HY092, "Invalid attribute/option identifier". However, there are a small number of differences between using DBD::ODBC's execute_for_fetch compared with using DBI's default implementation (which simply calls execute repeatedly once per row). The differences you may see are: o as DBI's execute_for_fetch does one row at a time the result from execute is for one row and just about all ODBC drivers can report the number of affected rows when SQLRowCount is called per execute. When batches of parameters are sent the driver can still return the number of affected rows but it is usually per batch rather than per row. As a result, the tuple_status array you may pass to execute_for_fetch (or execute_array) usually shows -1 (unknown) for each row although the total affected returned in array context is a correct total affected. o not all ODBC drivers have sufficient ODBC support (arguably a bug) for correct diagnostics support when using arrays. DBI dictates that if a row in the batch is in error the tuple_status will contain the state, native and error message text. However the batch may generate multiple errors per row (which DBI says nothing about) and more than one row may error. In ODBC we get a list of errors but to associate each one with a particular row we need to call SQLGetDiagField for SQL_DIAG_ROW_NUMBER and it should say which row in the batch the diagnostic is associated with. Some ODBC drivers do not support SQL_DIAG_ROW_NUMBER properly and then DBD::ODBC cannot know which row in the batch an error refers to. In this case DBD::ODBC will report an error saying "failed to retrieve diags", state of HY000 and a native of 1 so you'll still see an error but not necessarily the exact one. Also, when more than one diagnostic is found for a row DBD::ODBC picks the first one (which is usually most relevant) as there is no way to report more than one diagnostic per row in the tuple_status. If the first problem of SQL_DIAG_ROW_NUMBER proves to be a problem for you the DBD::ODBC tracing will show all errors and you can also use L yourself. o Binding parameters with execute_array and execute_for_fetch does not allow the parameter types to be set. However, as parameter types are sticky you can call bind_param(param_num, undef, {TYPE => sql_type}) before calling execute_for_fetch/execute_array and the TYPE should be sticky when the batch of parameters is bound. o Although you can insert very large columns execute_for_fetch will need L * max length of parameter per parameter so you may hit memory limits. If you use DBI's execute_for_fetch DBD::ODBC uses the ODBC API SQLPutData (see L) which does not require large amounts of memory as large columns are sent in pieces. o A lot of drivers have bugs with arrays of parameters (see the ODBC FAQ). e.g., as of 18-MAR-2012 I've seen the latest SQLite ODBC driver seg fault and freeTDS 8/0.91 returns the wrong row count for batches. o B attempt to do an insert/update/delete and a select in the same SQL with execute_array e.g., SET IDENTITY_INSERT mytable ON insert into mytable (id, name) values (?,?) SET IDENTITY_INSERT mytable OFF SELECT SCOPE_IDENTITY() It just won't/can't work although you may not have noticed when using DBI's inbuilt execute_* methods. See rt 75687. =head3 type_info_all Many ODBC drivers now return 20 columns in type_info_all rather than the 19 DBI documents. The 20th column is usually called "USERTYPE". Recent MS SQL Server ODBC drivers do this. Fortunately this should not adversely affect you so long as you are using the keys provided at the start of type_info_all. =head3 Binding Columns The DBI specification allows a column type to be overridden in the call to the bind_col method. Mostly, DBD::ODBC ignores this type as it binds integers (SQL_INTEGER) as a SQL_C_LONG (since DBD::ODBC 1.38_1) and all other columns as SQL_C_CHAR or SQL_C_WCHAR and it is too late to change the bind type after the result-set has been described anyway. The only time when the TYPE passed to bind_col is used in DBD::ODBC is when it is SQL_NUMERIC or SQL_DOUBLE in which case DBD::ODBC will call DBI's sql_type_cast method. Since DBD::ODBC 1.38_1 if you attempt to change the bind type after the column has already bound DBD::ODBC will issue a warning and ignore your column type change e.g., my $s = $h->prepare(q/select a from mytable); $s->execute; # The column type was determined here my $r; $s->bind_col(1, \$r); # and bound as the right type here $s->execute; $s->bind_col(1, \$r, {TYPE => SQL_XXX}); # warning, type changed Basically, if you are passing a TYPE to bind_col with DBD::ODBC (other than SQL_NUMERIC or SQL_DOUBLE) your code is probably wrong. Significant changes occurred in DBD::ODBC at 1.38_1 for binding columns. Please see the Changes file. =head3 bind_param DBD::ODBC follows the DBI specification for bind_param however the third argument (a type or a hashref containing a type) is loosely defined by DBI. From the DBI pod: I As a general rule, don't specify a type when calling bind_param. If you stick to inserting appropriate data into the appropriate column DBD::ODBC will mostly do the right thing especially if the ODBC driver supports SQLDescribeParam. In particular don't just add a type of SQL_DATE because you are inserting a date (it will not work). The correct syntax in ODBC for inserting dates, times and timestamps is: insert into mytable (mydate, mttime, mytimestamp) values(?,?,?); bind_param(1, "{d 'YYYY-MM-DD'}"); bind_param(2, "{t 'HH:MM:SS.MM'}"); # :MM can be omitted and some dbs support :MMM bind_param(3, "{ts 'YYYY-MM-DD HH:MM:SS'}"); See http://technet.microsoft.com/en-US/library/ms190234%28v=SQL.90%29.aspx The only times when you might want to add a type are: 1. If your ODBC driver does not support SQLDescribeParam (or if you told DBD::ODBC not to use it) then DBD::ODBC will default to inserting each parameter as a string (which is usually the right thing anyway). This is ok, most of the time, but is probably not what you want when inserting a binary (use TYPE => SQL_BINARY). 2. If for some reason your driver describes the parameter incorrectly. It is difficult to describe an example of this. 3. If SQLDescribeParam is supported but fails e.g., MS SQL Server has problems with SQL like "select myfunc(?) where 1 = 1". Also, DBI exports some types which are not available in ODBC e.g., SQL_BLOB. If you are unsure about ODBC types look at your ODBC header files or look up valid types in the ODBC specification. =head3 tables and table_info These are not really deviations from the DBI specification but a clarification of a DBI optional feature. DBD::ODBC supports wildcards (% and _) in the catalog, schema and type arguments. However, you should be aware that if the statement attribute SQL_ATTR_METADATA_ID is SQL_TRUE the values are interpreted as identifiers and the case is ignored. SQL_ATTR_METADATA_ID defaults to SQL_FALSE so normally the values are treated as patterns and the case is significant. SQLGetInfo for SQL_ACCESSIBLE_TABLES can affect what tables you can list. All the special cases listed by DBI (empty strings for all arguments but one which is '%') for catalog, schema and table type are supported by DBD::ODBC. However, using '%' for table type in a call to the tables method does not currently work with DBI up to 1.631 due to an issue in DBI. Although DBD::ODBC supports all the usage cases listed by DBI, your ODBC driver may not. =head2 Unicode The ODBC specification supports wide character versions (a postfix of 'W') of some of the normal ODBC APIs e.g., SQLDriverConnectW is a wide character version of SQLDriverConnect. In ODBC on Windows the wide characters are defined as SQLWCHARs (2 bytes) and are UCS-2 (but UTF-16 is accepted by some drivers now e.g., MS SQL Server 2012 and the new collation suffix _SC which stands for Supplementary Character Support). On non-Windows, the main driver managers I know of have implemented the wide character APIs differently: =over =item unixODBC unixODBC mimics the Windows ODBC API precisely meaning the wide character versions expect and return 2-byte characters in UCS-2 or UTF-16. unixODBC will happily recognise ODBC drivers which only have the ANSI versions of the ODBC API and those that have the wide versions too. unixODBC will allow an ANSI application to work with a unicode ODBC driver and vice versa (although in the latter case you obviously cannot actually use unicode). unixODBC does not prevent you sending UTF-8 in the ANSI versions of the ODBC APIs but whether that is understood by your ODBC driver is another matter. unixODBC differs in only one way from the Microsoft ODBC driver in terms of unicode support in that it avoids unnecessary translations between single byte and double byte characters when an ANSI application is using a unicode-aware ODBC driver by requiring unicode applications to signal their intent by calling SQLDriverConnectW first. On Windows, the ODBC driver manager always uses the wide versions of the ODBC API in ODBC drivers which provide the wide versions regardless of what the application really needs and this results in a lot of unnecessary character translations when you have an ANSI application and a unicode ODBC driver. =item iODBC The wide character versions expect and return wchar_t types. =back DBD::ODBC has gone with unixODBC so you cannot use iODBC with a unicode build of DBD::ODBC. However, some ODBC drivers support UTF-8 (although how they do this with SQLGetData reliably I don't know) and so you should be able to use those with DBD::ODBC not built for unicode. =head3 Enabling and Disabling Unicode support On Windows Unicode support is enabled by default and to disable it you will need to specify C<-nou> to F to get back to the original behavior of DBD::ODBC before any Unicode support was added. e.g., perl Makfile.PL -nou On non-Windows platforms Unicode support is disabled by default. To enable it specify C<-u> to F when you configure DBD::ODBC. e.g., perl Makefile.PL -u =head3 Unicode - What is supported? As of version 1.17 DBD::ODBC has the following unicode support: =over =item SQL (introduced in 1.16_2) Unicode strings in calls to the C and C methods are supported so long as the C attribute is not used. =item unicode connection strings (introduced in 1.16_2) Unicode connection strings are supported but you will need a DBI post 1.607 for that. =item column names Unicode column names are returned. =item bound columns (introduced in 1.15) If the DBMS reports the column as being a wide character (SQL_Wxxx) it will be bound as a wide character and any returned data will be converted from UTF-16 to UTF-8 and the UTF-8 flag will then be set on the data. =item bound parameters If the perl scalars you bind to parameters are marked UTF-8 and the DBMS reports the type as being a wide type or you bind the parameter as a wide type they will be converted to wide characters and bound as such. =item metadata calls like table_info, column_info As of DBD::ODBC 1.32_3 meta data calls accept Unicode strings. =back Since version 1.16_4, the default parameter bind type is SQL_WVARCHAR for unicode builds of DBD::ODBC. This only affects ODBC drivers which do not support SQLDescribeParam and only then if you do not specifically set a SQL type on the bind_param method call. The above Unicode support has been tested with the SQL Server, Oracle 9.2+ and Postgres drivers on Windows and various Easysoft ODBC drivers on UNIX. =head3 Unicode - What is not supported? You cannot use unicode parameter names e.g., select * from table where column = :unicode_param_name You cannot use unicode strings in calls to prepare if you set the odbc_execdirect attribute. You cannot use the iODBC driver manager with DBD::ODBC built for unicode. =head3 Unicode - Caveats For Unicode support on any platform in Perl you will need at least Perl 5.8.1 - sorry but this is the way it is with Perl. The Unicode support in DBD::ODBC expects a WCHAR to be 2 bytes (as it is on Windows and as the ODBC specification suggests it is). Until ODBC specifies any other Unicode support it is not envisioned this will change. On UNIX there are a few different ODBC driver managers. I have only tested the unixODBC driver manager (http://www.unixodbc.org) with Unicode support and it was built with defaults which set WCHAR as 2 bytes. I believe that the iODBC driver manager expects wide characters to be wchar_t types (which are usually 4 bytes) and hence DBD::ODBC will not work iODBC when built for unicode. The ODBC Driver must expect Unicode data specified in SQLBindParameter and SQLBindCol to be UTF-16 in local endianness. Similarly, in calls to SQLPrepareW, SQLDescribeColW and SQLDriverConnectW. You should be aware that once Unicode support is enabled it affects a number of DBI methods (some of which you might not expect). For instance, when listing tables, columns etc some drivers (e.g. Microsoft SQL Server) will report the column types as wide types even if the strings actually fit in 7-bit ASCII. As a result, there is an overhead for retrieving this column data as 2 bytes per character will be transmitted (compared with 1 when Unicode support is not enabled) and these strings will be converted into UTF-8 but will end up fitting (in most cases) into 7bit ASCII so a lot of conversion work has been performed for nothing. If you don't have Unicode table and column names or Unicode column data in your tables you are best disabling Unicode support. I am at present unsure if ChopBlanks processing on Unicode strings is working correctly on UNIX. If nothing else the construct L' ' in dbdimp.c might not work with all UNIX compilers. Reports of issues and patches welcome. =head3 Unicode implementation in DBD::ODBC DBD::ODBC uses the wide character versions of the ODBC API and the SQL_WCHAR ODBC type to support unicode in Perl. Wide characters returned from the ODBC driver will be converted to UTF-8 and the perl scalars will have the utf8 flag set (by using sv_utf8_decode). B Perl scalars which are UTF-8 and are sent through the ODBC API will be converted to UTF-16 and passed to the ODBC wide APIs or signalled as SQL_WCHARs (e.g., in the case of bound columns). Retrieved data which are wide characters are converted from UTF-16 to UTF-8. However, you should realise most ODBC drivers do not support UTF-16, ODBC only talks about wide characters being 2 bytes and UCS-2 and UCS-2 and UTF-16 are not the same. UCS-2 only supports Unicode characters in the first plane (the Basic Multilangual Plane or BMP) (code points U+0000 to U+FFFF), the most frequently used characters. So why does DBD::ODBC currently encode in UTF-16? For around 97% of Unicode characters in the range 0-0xFFFF UCS-2 and UTF-16 are exactly the same (and where they differ there is no valid Unicode character as the range U+D800 to U+DFFF is reserved from use only as surrogate pairs). As the ODBC API currently uses UCS-2 it does not support Unicode characters with code points above 0xFFFF (if you know better I'd like to hear from you). However, because DBD::ODBC uses UTF-16 encoding you can still insert Unicode characters above 0xFFFF into your database and retrieve them back correctly but they may not being treated as a single Unicode character in your database e.g., a "select length(a_column) from table" with a single Unicode character above 0xFFFF may return 2 and not 1 so you cannot use database functions on that data like upper/lower/length etc but you can at least save the data in your database and get it back. When built for unicode, DBD::ODBC will always call SQLDriverConnectW (and not SQLDriverConnect) even if a) your connection string is not unicode b) you have not got a DBI later than 1.607, because unixODBC requires SQLDriverConnectW to be called if you want to call other unicode ODBC APIs later. As a result, if you build for unicode and pass ASCII strings to the connect method they will be converted to UTF-16 and passed to SQLDriverConnectW. This should make no real difference to perl not using unicode connection strings. You will need a DBI later than 1.607 to support unicode connection strings because until post 1.607 there was no way for DBI to pass unicode strings to the DBD. =head3 Unicode and Oracle You have to set the environment variables C and C (or any other language setting ending with C<.AL32UTF8>) before loading DBD::ODBC to make Oracle return Unicode data. (See also "Oracle and Unicode" in the POD of DBD::Oracle.) On Windows, using the Oracle ODBC Driver you have to enable the B Workaround in the data source configuration to make Oracle return Unicode to a non-Unicode application. Alternatively, you can include C in your connect string. Unless you need to use ODBC, if you want Unicode support with Oracle you are better off using L. =head3 Unicode and PostgreSQL See the odbc_utf8_on parameter to treat all strings as utf8. Some tests from the original DBD::ODBC 1.13 fail with PostgreSQL 8.0.3, so you may not want to use DBD::ODBC to connect to PostgreSQL 8.0.3. Unicode tests fail because PostgreSQL seems not to give any hints about Unicode, so all data is treated as non-Unicode. Unless you need to use ODBC, if you want Unicode support with Postgres you are better off with L as it has a specific attribute named C to enable Unicode support. =head3 Unicode and Easysoft ODBC Drivers We have tested the Easysoft SQL Server, Oracle and ODBC Bridge drivers with DBD::ODBC built for Unicode. All work as described without modification except for the Oracle driver you will need to set you NLS_LANG as mentioned above. =head3 Unicode and other ODBC drivers If you have a unicode-enabled ODBC driver and it works with DBD::ODBC let me know and I will include it here. =head2 ODBC Support in ODBC Drivers =head3 Drivers without SQLDescribeParam Some drivers do not support the C ODBC API (e.g., Microsoft Access, FreeTDS). DBD::ODBC uses the C API when parameters are bound to your SQL to find the types of the parameters. If the ODBC driver does not support C, DBD::ODBC assumes the parameters are C or C types (depending on whether DBD::ODBC is built for unicode or not and whether your parameter is unicode data). In any case, if you bind a parameter and specify a SQL type this overrides any type DBD::ODBC would choose. For ODBC drivers which do not support C the default behavior in DBD::ODBC may not be what you want. To change the default parameter bind type set L. If, after that you have some SQL where you need to vary the parameter types used add the SQL type to the end of the C method. use DBI qw(:sql_types); $h = DBI->connect; # set the default bound parameter type $h->{odbc_default_bind_type} = SQL_VARCHAR; # bind a parameter with a specific type $s = $h->prepare(q/insert into mytable values(?)/); $s->bind_param(1, "\x{263a}", SQL_WVARCHAR); =head2 MS SQL Server Query Notification Query notifications were introduced in SQL Server 2005 and SQL Server Native Client. Query notifications allow applications to be notified when data has changed. DBD::ODBC supports query notification with MS SQL Server using the additional prepare attributes odbc_qn_msgtxt, odbc_qn_options and odbc_qn_timeout. When you pass suitable values for these attributes to the prepare method, DBD::ODBC will make the appropriate SQLSetStmtAttr calls after the statement has been allocated. It is beyond the scope of this document to provide a tutorial on doing this but here are some notes that might help you get started. On SQL Server create database MyDatabase ALTER DATABASE MyDatabase SET ENABLE_BROKER use MyDatabase CREATE TABLE QNtest (a int NOT NULL PRIMARY KEY, b nchar(5) NOT NULL, c datetime NOT NULL) INSERT QNtest (a, b, c) SELECT 1, 'ALFKI', '19991212' CREATE QUEUE myQueue CREATE SERVICE myService ON QUEUE myQueue See L You need to set these SQL Server permissions unless the subscriber is a sysadmin: GRANT RECEIVE ON QueryNotificationErrorsQueue TO "" GRANT SUBSCRIBE QUERY NOTIFICATIONS TO "" To subscribe to query notification for this example: # Prepare the statement. # This is the SQL you want to know if the result changes later my $sth = $dbh->prepare(q/SELECT a, b, c FROM dbo.QNtest WHERE a = 1/, {odbc_qn_msgtxt => 'Message text', odbc_qn_options => 'service=myService', odbc_qn_timeout=> 430000}); # Fetch and display the result set value. while ( my @row = $sth->fetchrow_array ) { print "@row\n"; } # select * from sys.dm_qn_subscriptions will return a record now you are subscribed To wait for notification: # Avoid "String data, right truncation" error when retrieving # the message. $dbh->{LongReadLen} = 800; # This query generates a result telling you which query has changed # It will block until the timeout or the query changes my $sth = $dbh->prepare(q/WAITFOR (RECEIVE * FROM MyQueue)/); $sth->execute(); # in the mean time someone does UPDATE dbo.QNtest SET c = '19981212' WHERE a = 1 # Fetch and display the result set value. while ( my @row = $sth->fetchrow_array ) { print "@row\n"; } # You now need to understand the result and look to decide which query has changed =head2 Version Control DBD::ODBC source code was under version control at svn.perl.org until April 2013 when svn.perl.org was closed down and it is now on github at https://github.com/perl5-dbi/DBD-ODBC.git. =head2 Contributing There are a number of ways you may help with the development and maintenance of this module: =over =item Submitting patches Please send me a git pull request or email a unified diff. Please try and include a test which demonstrates the fix/change working. =item Reporting installs Install CPAN::Reporter and report you installations. This is easy to do - see L. =item Report bugs If you find what you believe is a bug then enter it into the L system. Where possible include code which reproduces the problem including any schema required and the versions of software you are using. If you are unsure whether you have found a bug report it anyway or post it to the dbi-users mailing list. =item pod comments and corrections If you find inaccuracies in the DBD::ODBC pod or have a comment which you think should be added then go to L and submit them there. I get an email for every comment added and will review each one and apply any changes to the documentation. =item Review DBD::ODBC Add your review of DBD::ODBC on L. If you are a member on ohloh then add your review or register your use of DBD::ODBC at L. =item submit test cases Most DBDs are built against a single client library for the database. Unlike other DBDs, DBD::ODBC works with many different ODBC drivers. Although they all should be written with regard to the ODBC specification drivers have bugs and in some places the specification is open to interpretation. As a result, when changes are applied to DBD::ODBC it is very easy to break something in one ODBC driver. What helps enormously to identify problems in the many combinations of DBD::ODBC and ODBC drivers is a large test suite. I would greatly appreciate any test cases and in particular any new test cases for databases other than MS SQL Server. =item Test DBD::ODBC I have a lot of problems deciding when to move a development release to an official release since I get few test reports for development releases. What often happens is I call for testers on various lists, get a few and then get inundated with requests to do an official release. Then I do an official release and loads of rts appear out of nowhere and the cycle starts again. DBD::ODBC by its very nature works with many ODBC Drivers and it is impossible for me to have and test them all (this differs from other DBDs). If you depend on DBD::ODBC you should be interested in new releases and if you send me your email address suggesting you are prepared to be part of the DBD::ODBC testing network I will credit you in the Changes file and perhaps the main DBD::ODBC file. =back =head2 CPAN Testers Reporting Please, please, please (is that enough), consider installing CPAN::Reporter so that when you install perl modules a report of the installation success or failure can be sent to cpan testers. In this way module authors 1) get feedback on the fact that a module is being installed 2) get to know if there are any installation problems. Also other people like you may look at the test reports to see how successful they are before choosing the version of a module to install. See this guide on how to get started with sending test reports: L. =head2 Others/todo? Level 2 SQLColumnPrivileges SQLProcedureColumns SQLProcedures SQLTablePrivileges SQLDrivers SQLNativeSql =head2 Random Links These are in need of sorting and annotating. Some are relevant only to ODBC developers. You can find DBD::ODBC on ohloh now at: L If you use ohloh and DBD::ODBC please say you use it and rate it. There is a good search engine for the various Perl DBI lists at the following URLS: L L L L L For Linux/Unix folks, compatible ODBC driver managers can be found at: L (unixODBC source and rpms) L (iODBC driver manager source) For Linux/Unix folks, you can checkout the following for ODBC Drivers and Bridges: L L L L =head2 Some useful tutorials: Debugging Perl DBI: L Enabling ODBC support in Perl with Perl DBI and DBD::ODBC: L Perl DBI/DBD::ODBC Tutorial Part 1 - Drivers, Data Sources and Connection: L Perl DBI/DBD::ODBC Tutorial Part 2 - Introduction to retrieving data from your database: L Perl DBI/DBD::ODBC Tutorial Part 3 - Connecting Perl on UNIX or Linux to Microsoft SQL Server: L Perl DBI - Put Your Data On The Web: L Multiple Active Statements (MAS) and DBD::ODBC L 64-bit ODBC L How do I insert Unicode supplementary characters into SQL Server from Perl? L Some Common Unicode Problems and Solutions using Perl DBD::ODBC and MS SQL Server L and a version possibly kept more up to date: L How do I use SQL Server Query Notifications from Linux and UNIX? L =head2 Frequently Asked Questions Frequently asked questions are now in L. Run C to view them. =head1 CONFIGURATION AND ENVIRONMENT You should consult the documentation for the ODBC Driver Manager you are using. =head1 DEPENDENCIES L L =head1 INCOMPATIBILITIES None known. =head1 BUGS AND LIMITATIONS None known other than the deviations from the DBI specification mentioned above in L. Please report any to me via the CPAN RT system. See L for more details. =head1 AUTHOR Tim Bunce Jeff Urlwin Thomas K. Wenrich Martin J. Evans =head1 LICENSE AND COPYRIGHT This program is free software; you can redistribute it and/or modify it under the same terms as Perl itself. See L. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. Portions of this software are Copyright Tim Bunce, Thomas K. Wenrich, Jeff Urlwin and Martin J. Evans - see the source. =head1 SEE ALSO L DBD::ODBC can be used with many ODBC drivers to many different databases. If you want a generic DBD for multiple databases DBD::ODBC is probably for you. If you are only accessing a single database then you might want to look for DBD::my_database (e.g. DBD::Oracle) as database specific DBDs often have more functionality. L or L for logging DBI method calls, SQL, parameters and results. =cut DBD-ODBC-1.61/MANIFEST0000644000175000017500000000504312456435505013251 0ustar martinmartinChanges TO_DO ConvertUTF.c ConvertUTF.h dbdimp.c dbdimp.h dbivport.h fixup_c.h fixup_t.h FAQ Makefile.PL MANIFEST This list of files META.yml test_results.txt examples/backup_restore.pl examples/big_result.pl examples/cancel_big_fetch.pl examples/column_info.pl examples/DbiTest.pl examples/DbiTest2.pl examples/dml_counts.pl examples/enable_odbc_tracing.pl examples/execute_for_fetch.pl examples/getinfo.pl examples/identity.pl examples/joetest7.pl examples/leakcheck.pl examples/listtabs.pl examples/lob_read.pl examples/longbin.pl examples/money_test.cgi examples/moreresults.pl examples/multiple_active_stmts.pl examples/northwind.pl examples/odbc_diag.pl examples/params_in_error.pl examples/driver_complete.pl examples/proctest1.pl examples/proctest2.pl examples/proctest3.pl examples/raiserror.pl examples/randombind.pl examples/set_nocount_on.pl examples/sqltmptabs.pl examples/temp_table.pl examples/Test_Dates_Jun19.pl examples/testclob.pl examples/testconn.pl examples/testconnspeed.pl examples/testcrtable.pl examples/testdatasources.pl examples/testdestruction.pl examples/testdisc.pl examples/testerrhandler.pl examples/testfunc.pl examples/testgetinfo.pl examples/testigparams.pl examples/testinout.pl examples/testkeys.pl examples/testmoney.pl examples/testmulti.pl examples/testPrc.sql examples/testproc.pl examples/testproc2.pl examples/testproc3.pl examples/testproc4.pl examples/testspmulti.pl examples/testundef.pl examples/testundef2.pl examples/testundef3.pl examples/testver.pl examples/testxml.pl examples/thrtest.pl examples/timetest.pl examples/unicode_params.pl examples/unicode_sql.pl examples/sqlserver_supplementary_chrs.pl examples/perl-DBD-ODBC.spec ODBC.h ODBC.pm ODBC.xs README README.adabas README.af README.hpux README.informix README.RH9 README.unicode README.windows README.osx README.sqlserver t/01base.t t/02simple.t t/03dbatt.t t/05meth.t t/07bind.t t/08bind2.t t/09multi.t t/10handler.t t/12blob.t t/20SqlServer.t t/30Oracle.t t/40UnicodeRoundTrip.t t/41Unicode.t t/45_unicode_varchar.t t/50_odbc_utf8_on.t t/70execute_array_dbi.t t/70execute_array_native.t t/80_odbc_diags.t t/82_table_info.t t/87_odbc_lob_read.t t/90_trace_flags.t t/ODBCTEST.pm t/ExecuteArray.pm t/UChelp.pm t/pod.t t/pod-coverage.t t/rt_38977.t t/rt_39841.t t/rt_39897.t t/rt_43384.t t/rt_46597.t t/rt_50852.t t/rt_57957.t t/rt_null_nvarchar.t t/rt_59621.t t/rt_61370.t t/rt_62033.t t/rt_63550.t t/rt_78838.t t/rt_79190.t t/rt_79397.t t/rt_81911.t t/rt_101579.t t/sql_type_cast.t t/odbc_describe_parameter.t unicode_helper.c unicode_helper.h if_you_are_taking_over_this_code.txt DBD-ODBC-1.61/ODBC.xs0000644000175000017500000003043113246023362013172 0ustar martinmartin#include "ODBC.h" DBISTATE_DECLARE; MODULE = DBD::ODBC PACKAGE = DBD::ODBC INCLUDE: ODBC.xsi MODULE = DBD::ODBC PACKAGE = DBD::ODBC::dr void _data_sources( drh, attribs=Nullsv ) SV * drh SV * attribs CODE: { AV *ds = dbd_data_sources( drh ); ST(0) = sv_2mortal( newRV_noinc( (SV*)ds ) ); } MODULE = DBD::ODBC PACKAGE = DBD::ODBC::st void odbc_describe_param(sth, param) SV * sth SV * param PPCODE: D_imp_sth(sth); D_imp_xxh(sth); SQLRETURN rc; SQLSMALLINT data_type; SQLULEN size; SQLSMALLINT dd; SQLSMALLINT nullable; rc = SQLDescribeParam(imp_sth->hstmt, SvIV(param), &data_type, &size, &dd, &nullable); if (SQL_SUCCEEDED(rc)) { XPUSHs(sv_2mortal(newSViv(data_type))); XPUSHs(sv_2mortal(newSViv(size))); XPUSHs(sv_2mortal(newSViv(dd))); XPUSHs(sv_2mortal(newSViv(nullable))); } else { DBIh_SET_ERR_CHAR( sth, imp_xxh, Nullch, 1, "SQLDescribeParam failed", "IM008", Nullch); } IV odbc_rows(sth) SV * sth PREINIT: IV ret; CODE: ret = odbc_st_rowcount(sth); if (ret == 0) XST_mPV(0, "0E0"); else if (ret < -1) XST_mUNDEF(0); else XST_mIV(0, ret); void odbc_execute_for_fetch(sth, tuples, count, tuple_status) SV * sth SV * tuples IV count SV * tuple_status PREINIT: IV ret; CODE: /*printf("odbc_execute_array\n");*/ ret = odbc_st_execute_for_fetch(sth, tuples, count, tuple_status); if (ret == 0) XST_mPV(0, "0E0"); else if (ret < -1) /* Error */ XST_mUNDEF(0); else XST_mIV(0, ret); void odbc_getdiagrec(sth, record) SV * sth unsigned short record PPCODE: SQLINTEGER native; SQLCHAR state[10]; SQLCHAR msg[256]; SQLRETURN rc; SQLSMALLINT msg_len; D_imp_sth(sth); D_imp_xxh(sth); rc = SQLGetDiagRec(SQL_HANDLE_STMT, imp_sth->hstmt, record, state, &native, msg, sizeof(msg), &msg_len); if (SQL_SUCCEEDED(rc)) { XPUSHs(sv_2mortal(newSVpv(state, 0))); XPUSHs(sv_2mortal(newSViv(native))); XPUSHs(sv_2mortal(newSVpv(msg, 0))); } else if (rc == SQL_NO_DATA) { # no diags found } else { DBIh_SET_ERR_CHAR( sth, imp_xxh, Nullch, 1, "SQLGetDiagField failed", "IM008", Nullch); } void odbc_getdiagfield(sth, record, identifier) SV * sth unsigned short record int identifier PPCODE: SQLCHAR buf[256]; SQLSMALLINT buf_len; SQLLEN len_type = 0; SQLINTEGER int_type = 0; SQLRETURN ret_type = 0; SQLPOINTER info_ptr; SQLRETURN rc; D_imp_sth(sth); D_imp_xxh(sth); switch(identifier) { case SQL_DIAG_CURSOR_ROW_COUNT: case SQL_DIAG_ROW_COUNT: case SQL_DIAG_ROW_NUMBER: { info_ptr = &len_type; break; } case SQL_DIAG_DYNAMIC_FUNCTION_CODE: case SQL_DIAG_NUMBER: case SQL_DIAG_COLUMN_NUMBER: case SQL_DIAG_NATIVE: { info_ptr = &int_type; break; } case SQL_DIAG_RETURNCODE: { info_ptr = &ret_type; break; } default: { info_ptr = buf; break; } } rc = SQLGetDiagField(SQL_HANDLE_STMT, imp_sth->hstmt, record, identifier, info_ptr, sizeof(buf), &buf_len); if (SQL_SUCCEEDED(rc)) { if (info_ptr == &int_type) { XPUSHs(sv_2mortal(newSViv(int_type))); } else if (info_ptr == &len_type) { XPUSHs(sv_2mortal(newSViv(len_type))); } else if (info_ptr == &ret_type) { XPUSHs(sv_2mortal(newSViv(ret_type))); } else { XPUSHs(sv_2mortal(newSVpvn(buf, buf_len))); } } else if (rc == SQL_NO_DATA) { # no diags found } else { DBIh_SET_ERR_CHAR( sth, imp_xxh, Nullch, 1, "SQLGetDiagField failed", "IM008", Nullch); # TO_DO wrong state } SV * odbc_lob_read(sth, colno, bufsv, length, attr = NULL) SV *sth int colno SV *bufsv UV length SV *attr; PROTOTYPE: $$$$;$ PREINIT: char *buf; IV ret_len; IV sql_type = 0; INIT: if (length == 0) { croak("Cannot retrieve 0 length lob"); } CODE: if (attr) { SV **svp; DBD_ATTRIBS_CHECK("odbc_lob_read", sth, attr); DBD_ATTRIB_GET_IV(attr, "TYPE", 4, svp, sql_type); } if (SvROK(bufsv)) { bufsv = SvRV(bufsv); } sv_setpvn(bufsv, "", 0); /* ensure we can grow ok */ buf = SvGROW(bufsv, length + 1); ret_len = odbc_st_lob_read(sth, colno, bufsv, length, sql_type); if (ret_len >= 0) { SvCUR_set(bufsv, ret_len); /* set length in SV */ *SvEND(bufsv) = '\0'; /* NUL terminate */ SvSETMAGIC(bufsv); RETVAL = newSViv(ret_len); } else { XSRETURN_UNDEF; } OUTPUT: RETVAL # ColAttributes was down in the TO_DO list to remove since it duplicated # some functionality in DBI statement attributes. However, ColAttributes # can do more void _ColAttributes(sth, colno, ftype) SV * sth int colno int ftype CODE: ST(0) = odbc_col_attributes(sth, colno, ftype); void _Cancel(sth) SV * sth CODE: ST(0) = odbc_cancel(sth); void _tables(dbh, sth, catalog, schema, table, type) SV * dbh SV * sth SV * catalog SV * schema SV * table SV * type CODE: /* list all tables and views (0 as last parameter) */ ST(0) = dbd_st_tables(dbh, sth, catalog, schema, table, type) ? &PL_sv_yes : &PL_sv_no; void _primary_keys(dbh, sth, catalog, schema, table) SV * dbh SV * sth char * catalog char * schema char * table CODE: ST(0) = dbd_st_primary_keys(dbh, sth, catalog, schema, table) ? &PL_sv_yes : &PL_sv_no; void _statistics(dbh, sth, catalog, schema, table, unique, quick) SV * dbh SV * sth char * catalog char * schema char * table int unique int quick CODE: ST(0) = dbd_st_statistics(dbh, sth, catalog, schema, table, unique, quick) ? &PL_sv_yes : &PL_sv_no; #void #DescribeCol(sth, colno) # SV *sth # int colno # # PPCODE: # # char ColumnName[SQL_MAX_COLUMN_NAME_LEN]; # I16 NameLength; # I16 DataType; # U32 ColumnSize; # I16 DecimalDigits; # I16 Nullable; # int rc; # # rc = odbc_describe_col(sth, colno, ColumnName, sizeof(ColumnName), &NameLength, # &DataType, &ColumnSize, &DecimalDigits, &Nullable); # if (rc) { # XPUSHs(newSVpv(ColumnName, 0)); # XPUSHs(newSViv(DataType)); # XPUSHs(newSViv(ColumnSize)); # XPUSHs(newSViv(DecimalDigits)); # XPUSHs(newSViv(Nullable)); # } # ------------------------------------------------------------ # database level interface # ------------------------------------------------------------ MODULE = DBD::ODBC PACKAGE = DBD::ODBC::db void _ExecDirect( dbh, stmt ) SV * dbh SV * stmt CODE: { /*STRLEN lna;*/ /*char *pstmt = SvOK(stmt) ? SvPV(stmt,lna) : "";*/ ST(0) = sv_2mortal(newSViv( dbd_db_execdirect( dbh, stmt ) ) ); } void odbc_getdiagrec(dbh, record) SV * dbh unsigned short record PPCODE: SQLINTEGER native; SQLCHAR state[10]; SQLCHAR msg[256]; SQLRETURN rc; SQLSMALLINT msg_len; D_imp_dbh(dbh); D_imp_xxh(dbh); rc = SQLGetDiagRec(SQL_HANDLE_DBC, imp_dbh->hdbc, record, state, &native, msg, sizeof(msg), &msg_len); if (SQL_SUCCEEDED(rc)) { XPUSHs(sv_2mortal(newSVpv(state, 0))); XPUSHs(sv_2mortal(newSViv(native))); XPUSHs(sv_2mortal(newSVpvn(msg, msg_len))); } else if (rc == SQL_NO_DATA) { # no diags found } else { DBIh_SET_ERR_CHAR( dbh, imp_xxh, Nullch, 1, "SQLGetDiagRec failed", "IM008", Nullch); } void odbc_getdiagfield(dbh, record, identifier) SV * dbh unsigned short record int identifier PPCODE: SQLCHAR buf[256]; SQLSMALLINT buf_len; SQLLEN len_type; SQLINTEGER int_type; SQLRETURN ret_type; SQLPOINTER info_ptr; SQLRETURN rc; D_imp_dbh(dbh); D_imp_xxh(dbh); switch(identifier) { case SQL_DIAG_CURSOR_ROW_COUNT: case SQL_DIAG_ROW_COUNT: case SQL_DIAG_ROW_NUMBER: { info_ptr = &len_type; break; } case SQL_DIAG_DYNAMIC_FUNCTION_CODE: case SQL_DIAG_NUMBER: case SQL_DIAG_COLUMN_NUMBER: case SQL_DIAG_NATIVE: { info_ptr = &int_type; break; } case SQL_DIAG_RETURNCODE: { info_ptr = &ret_type; break; } default: { info_ptr = buf; break; } } rc = SQLGetDiagField(SQL_HANDLE_DBC, imp_dbh->hdbc, record, identifier, info_ptr, sizeof(buf), &buf_len); if (SQL_SUCCEEDED(rc)) { if (info_ptr == &int_type) { XPUSHs(sv_2mortal(newSViv(int_type))); } else if (info_ptr == &len_type) { XPUSHs(sv_2mortal(newSViv(len_type))); } else if (info_ptr == &ret_type) { XPUSHs(sv_2mortal(newSViv(ret_type))); } else { XPUSHs(sv_2mortal(newSVpvn(buf, buf_len))); } } else if (rc == SQL_NO_DATA) { # no diags found } else { DBIh_SET_ERR_CHAR( dbh, imp_xxh, Nullch, 1, "SQLGetDiagField failed", "IM008", Nullch); # TO_DO wrong state } # called from column_info void _columns(dbh, sth, catalog, schema, table, column) SV * dbh SV * sth SV * catalog SV * schema SV * table SV * column CODE: ST(0) = odbc_db_columns(dbh, sth, catalog, schema, table, column) ? &PL_sv_yes : &PL_sv_no; void _GetInfo(dbh, ftype) SV * dbh int ftype CODE: ST(0) = odbc_get_info(dbh, ftype); void _GetTypeInfo(dbh, sth, ftype) SV * dbh SV * sth int ftype CODE: ST(0) = odbc_get_type_info(dbh, sth, ftype) ? &PL_sv_yes : &PL_sv_no; void _GetStatistics(dbh, sth, CatalogName, SchemaName, TableName, Unique) SV * dbh SV * sth char * CatalogName char * SchemaName char * TableName int Unique CODE: ST(0) = dbd_st_statistics(dbh, sth, CatalogName, SchemaName, TableName, Unique, 0) ? &PL_sv_yes : &PL_sv_no; void _GetPrimaryKeys(dbh, sth, CatalogName, SchemaName, TableName) SV * dbh SV * sth char * CatalogName char * SchemaName char * TableName CODE: /* the following will end up in dbdimp.c/dbd_st_primary_keys */ ST(0) = odbc_st_primary_keys(dbh, sth, CatalogName, SchemaName, TableName) ? &PL_sv_yes : &PL_sv_no; void _GetSpecialColumns(dbh, sth, Identifier, CatalogName, SchemaName, TableName, Scope, Nullable) SV * dbh SV * sth int Identifier char * CatalogName char * SchemaName char * TableName int Scope int Nullable CODE: ST(0) = odbc_get_special_columns(dbh, sth, Identifier, CatalogName, SchemaName, TableName, Scope, Nullable) ? &PL_sv_yes : &PL_sv_no; void _GetForeignKeys(dbh, sth, PK_CatalogName, PK_SchemaName, PK_TableName, FK_CatalogName, FK_SchemaName, FK_TableName) SV * dbh SV * sth char * PK_CatalogName char * PK_SchemaName char * PK_TableName char * FK_CatalogName char * FK_SchemaName char * FK_TableName CODE: ST(0) = odbc_get_foreign_keys(dbh, sth, PK_CatalogName, PK_SchemaName, PK_TableName, FK_CatalogName, FK_SchemaName, FK_TableName) ? &PL_sv_yes : &PL_sv_no; # # Corresponds to ODBC 2.0. 3.0's SQL_API_ODBC3_ALL_FUNCTIONS is handled also # scheme void GetFunctions(dbh, func) SV * dbh unsigned short func PPCODE: UWORD pfExists[SQL_API_ODBC3_ALL_FUNCTIONS_SIZE]; RETCODE rc; int i; int j; D_imp_dbh(dbh); rc = SQLGetFunctions(imp_dbh->hdbc, func, pfExists); if (SQL_ok(rc)) { switch (func) { case SQL_API_ALL_FUNCTIONS: for (i = 0; i < 100; i++) { XPUSHs(pfExists[i] ? &PL_sv_yes : &PL_sv_no); } break; case SQL_API_ODBC3_ALL_FUNCTIONS: for (i = 0; i < SQL_API_ODBC3_ALL_FUNCTIONS_SIZE; i++) { for (j = 0; j < 8 * sizeof(pfExists[i]); j++) { XPUSHs((pfExists[i] & (1 << j)) ? &PL_sv_yes : &PL_sv_no); } } break; default: XPUSHs(pfExists[0] ? &PL_sv_yes : &PL_sv_no); } } MODULE = DBD::ODBC PACKAGE = DBD::ODBC::db DBD-ODBC-1.61/README.osx0000644000175000017500000000124312254016147013577 0ustar martinmartinA few people have reported the following error when running DBD::ODBC on MAC OS X 10.5 (Leopard): install_driver(ODBC) failed: Can't load '/Library/Perl/5.8.8/darwin-thread-multi-2level/auto/DBD/ODBC/ODBC.bundle' for module DBD::ODBC: dlopen(/Library/Perl/5.8.8/darwin-thread-multi-2level/auto/DBD/ODBC/ODBC.bundle, 1): Symbol not found: ___CFConstantStringClassReference Referenced from: /Library/Perl/5.8.8/darwin-thread-multi-2level/auto/DBD/ODBC/ODBC.bundle Expected in: dynamic lookup A possible workaround for this is to add "-framework CoreFoundation" to the Makefile for the LDLOADLIBS and EXTRALIBS symbols. Since DBD::ODBC post 1.16, this has been automated.DBD-ODBC-1.61/META.yml0000644000175000017500000000312113614567611013365 0ustar martinmartin#--- #YAML:1.0 name: DBD-ODBC abstract: ODBC DBD for Perl DBI version: 1.61 version_from: ODBC.pm author: - Martin J. Evans - Tim Bunce - Jeff Urlwin license: perl distribution_type: module requires: DBI: 1.609 perl: 5.008 Test::Simple: 0.90 # # Added build_requires/configure_requires DBI as DBD::ODBC needs DBI::DBD to # get postamble etc # build_requires: DBI: 1.21 ExtUtils::MakeMaker: 0 Test::Simple: 0.90 Config: 0 Data::Dumper: 0 Devel::Peek: 0 B: 0 overload: 0 strict: 0 warnings: 0 utf8: 0 configure_requires: DBI: 1.21 ExtUtils::MakeMaker: 0 recommends: Test::Version: 1.002001 # add provides in 1.54 (initially) but without the version number the distrib won't index # without the provides the indexer extracts the version itself and with provides and # no version it assumes 0 #provides: # DBD::ODBC: # file: ODBC.pm # DBD::ODBC::dr: # file: ODBC.pm # DBD::ODBC::db: # file: ODBC.pm # DBD::ODBC::st: # file: ODBC.pm dynamic_config: 1 resources: homepage: http://search.cpan.org/dist/DBD-ODBC/ license: http://dev.perl.org/licenses/ bugtracker: http://rt.cpan.org/Public/Dist/Display.html?Name=DBD-ODBC MailingList: http://dbi.perl.org/support repository: https://github.com/perl5-dbi/DBD-ODBC.git # this would be x_IRC if using v2 meta-spec IRC: irc://irc.perl.org/#dbi installdirs: site keywords: - ODBC - DBD - DBI meta-spec: version: 1.4 url: http://module-build.sourceforge.net/META-spec-v1.4.html generated_by: Martin J. Evans # ensure there is a newline at the end of this file or pause objects DBD-ODBC-1.61/README.sqlserver0000644000175000017500000000657312254016156015027 0ustar martinmartinIt is proving near impossible to workaround all the different bugs in the various SQL Server ODBC drivers and SQL Server Native Client drivers. From here on I am going to try and keep a record or what worked and what bugs I found. There are 5 basic ways I know to connect to SQL Server via ODBC: 1. On Windows via the SQL Server OBDC Driver 2. On Windows via the SQL Server Native Client 3. On UNIX via an ODBC driver manager (of which there are 3) and a SQL Server ODBC Driver. There are at least 4 of these: o Easysoft SQL Server ODBC Driver o Data Direct SQL Server Driver o Openlink SQL Server Driver o FreeTDS SQL Server Driver 4. On Unix via DBD::Proxy 5. On Unix via an ODBC-ODBC Bridge At least Easysoft and Openlink have these. Each driver manager and driver has it own bugs and other issues and it is exceedingly difficult to keep up with them all. For Windows and the various SQL Server drivers the list of bugs and workarounds is growing to a point making it difficult to keep track. Driver: SQL Server Version reported in ODBC Administrator: 2000.85.1132.00 Driver version: 03.85.1132 Driver name: SQLSRV32.DLL Passes all tests in: 1.18_2 Issues/Notes: 1. does not support MARS_Connection (see 20SqlServer.t) 2. needs special workaround when inserting more than 400K into a varxxx(max) field (see rt_38977.t). The workaround is to set the strlen_or_indptr in SQLBindParameter to SQL_LEN_DATA_AT_EXEC(0) and the buffer_length to 0. Without the workaround the errors produced in rt_38977.t are: The insert/update of a text or image column(s) did not succeed. The text, ntext, or image pointer value conflicts with the column name. 3. Sometimes describes parameters incorrectly. Although the workaround in dbdimp.c to set the column_size to the buffer_length works around most sometimes you need to set the parameter type yourself (see rt_39841.t). Driver: SQL Native Client Version reported in ODBC Administrator: 2005.90.1399.00 Driver version: 09.00.1399 Driver Name: SQLNCLI.DLL Passes all tests in: 1.18_2 Issues/Notes: 1. does support MARS_Connection 2. Sometimes describes parameters incorrectly. Although the workaround in dbdimp.c to set the column_size to the buffer_length works around most sometimes you need to set the parameter type yourself (see rt_39841.t). 3. Fails the rt_38977.t test to insert more than 400K into a varxxx(max) field unless column_size is set to 0 in the SQLBindParamter call. Driver: SQL Server Version reported in ODBC Administrator: 6.00.6000.16386 Driver version: 06.00.6000 Driver Name: SQLSRV32.DLL Passes all tests in: 1.18_2 Issues/Notes: Probably has many of the same issues as other SQL Server ODBC drivers but not individually tested. Driver: SQL Server Version reported in ODBC Administrator: 2000.86.1830.00 Driver version: 03.86.1830 Driver Name: SQLSRV32.DLL Passes all tests in: 1.18_2 Issues/Notes: Probably has many of the same issues as other SQL Server ODBC drivers but not individually tested. Driver: SQL Server Version reported in ODBC Administrator: 2000.85.1132.00 Driver version: ?????? Driver Name: SQLSRV32.DLL Passes all tests in: ?? Issues/Notes: There is more than one "driver version" reported for the version the ODBC Administrator reports (2000.85.1132.00) and at least one of them fails rt_38977.t with "Invalid precision value".DBD-ODBC-1.61/README.windows0000644000175000017500000001337312254016170014463 0ustar martinmartinBuilding DBD::ODBC on Windows ============================= Quick hints: 1. Make sure when you run "perl Makefile.PL" that you specify which make you are using (dmake, nmake). You do this with: perl Makefile.PL MAKE=nmake If you don't, ExtUtils::MakeMaker will generate the Makefile for the default make which may not be the one you are going to use and this will lead to make errors. 2. I've had a number of reports that attempting to build with Borland's make does not work. If that happens to you please don't tell me; get nmake or dmake. 3. If you are not interested in Unicode support disable it as Unicode support is built in by default on Windows builds. You can disable it with -nou as in: perl Makefile.PL -nou 4. If you are manually attempting to build DBD::ODBC for use with ActiveState Perl (ActivePerl) don't. You can use cpan like this: cpan M/MJ/MJEVANS/DBD-ODBC-1.23_4.tar.gz If you don't have Microsoft VC on your PATH, then this will download and install MinGW for you (inside the Perl tree) and use that instead. This should work for the latest builds of all major Perl versions, 5.8.9.827, 5.10.1.1007 and 5.12.0.1200. On older versions you may need to install MinGW with PPM "by-hand" first: ppm install MinGW cpan M/MJ/MJEVANS/DBD-ODBC-1.23_4.tar.gz On ActivePerl 5.8.8 and earlier you will have to download, install and configure MinGW and dmake manually though, so you may not want to bother... 5. With Strawberry Perl you can install DBD::ODBC from cpan as normal - see below. 64bit Windows ============= I have built DBD::ODBC on win64 and the tests to SQL Server completed successfully. However, you will need at least version 1.15 if you want to run DBD::ODBC on win64 successfully. If you are running a 32bit Perl on a 64bit Windows machine you will need to be aware there are two ODBC administrators and you need to create your DSNs with the right one. The ODBC Administrator you get to from Control Panel, Administrative Tools, Data Sources is the 64bit one and data sources created here will not be visible or useable from 32bit applications. The ODBC administrator you need to use for 32bit applications can be found at X:\windows\syswow64\odbcad32.exe. Strawberry Perl =============== If you attempt to install DBD::ODBC with: cpan -i DBD::ODBC cpan in strawberry perl adds INC to the command line and that overrides what MakeMaker attempts to add to INC. As a result, the build will probably be missing the include path for the DBIXS.h module. See http://rt.cpan.org/Public/Bug/Display.html?id=32811. Since DBD::ODBC 1.17_2 I believe we have a workaround in the Makefile.PL that stops this happening and in later Strawberry Perl versions the problem is fixed. Reported issues and workarounds =============================== Missing SQLLEN/SQLULEN definitions ================================== MS changed the ODBC header files and introduced SQLLEN/SQLULEN types which are required to build on 64 bit platforms so DBD::ODBC now needs them. If you find you are missing definitions for SQLLEN/SQLULEN you need updated header files. See the DBD::ODBC FAQ for where you can find them. Visual Studio 6.0 and SDK Feb 2003 ================================== It has been reported to me that there are problems building DBD::ODBC with VS 6 on some setups (thanks to JHF Remmelzwaal for the following example and workaround). Initially an nmake failed with: C:\Program Files\Microsoft SDK\Include\.\windows.h(157) : fatal error C1083: Cannot open include file: 'excpt.h': No such file or directory This was solved by: set PASTHRU_INC=-I"c:\Program Files\Microsoft SDK\include" -I"c:\Program Files\Microsoft Visual Studio\VC98\include" which led to: ODBC.obj : error LNK2001: unresolved external symbol __fltused dbdimp.obj : error LNK2001: unresolved external symbol __fltused unicode_helper.obj : error LNK2001: unresolved external symbol __fltused ODBC.obj : error LNK2001: unresolved external symbol __imp__sprintf dbdimp.obj : error LNK2001: unresolved external symbol __imp__sprintf ODBC.obj : error LNK2001: unresolved external symbol _strcpy dbdimp.obj : error LNK2001: unresolved external symbol _strcpy dbdimp.obj : error LNK2001: unresolved external symbol __imp__strncmp dbdimp.obj : error LNK2001: unresolved external symbol __imp__toupper dbdimp.obj : error LNK2001: unresolved external symbol __imp__strncpy dbdimp.obj : error LNK2001: unresolved external symbol __imp__strstr dbdimp.obj : error LNK2001: unresolved external symbol _memcpy dbdimp.obj : error LNK2001: unresolved external symbol _memset dbdimp.obj : error LNK2001: unresolved external symbol _strlen unicode_helper.obj : error LNK2001: unresolved external symbol _strlen dbdimp.obj : error LNK2001: unresolved external symbol _strcmp dbdimp.obj : error LNK2001: unresolved external symbol _strcat dbdimp.obj : error LNK2001: unresolved external symbol __imp__atoi dbdimp.obj : error LNK2001: unresolved external symbol _abs dbdimp.obj : error LNK2001: unresolved external symbol __imp___pctype dbdimp.obj : error LNK2001: unresolved external symbol __imp___isctype dbdimp.obj : error LNK2001: unresolved external symbol __imp____mb_cur_max dbdimp.obj : error LNK2001: unresolved external symbol __imp__strchr unicode_helper.obj : error LNK2001: unresolved external symbol __imp__wcslen LINK : error LNK2001: unresolved external symbol __DllMainCRTStartup@12 blib\arch\auto\DBD\ODBC\ODBC.dll : fatal error LNK1120: 20 unresolved externals NMAKE : fatal error U1077: 'link' : return code '0x460' which was resolved by adding c:\Program Files\Microsoft Visual Studio\VC98\Lib\MSVCRTD.LIB" to end of LDLOADLIBS in the Makefile.DBD-ODBC-1.61/README0000644000175000017500000001512412254016122012763 0ustar martinmartinDBD::ODBC -- DBD module interfacing the ODBC databases. $Id$ DBD::ODBC is Copyright (C) 1994,1995,1996,1997,1998 Tim Bunce portions Copyright (C) 1997,1998,1999,2000,2001,2002 Jeff Urlwin portions Copyright (C) 1997 Thomas K. Wenrich portions Copyright (C) 2007,2008,2009,2010,2012 Martin J. Evans LICENSE INFORMATION: This module is free software; you can redistribute it and/or modify it under the same terms as Perl 5.10.0. For more details, see the full text of the licenses at , and . Files with different licenses or copyright holders: ConvertUTF.c: Copyright (C) 2001-2004, Unicode, Inc. see ConvertUTF8.c for the license. This program is distributed in the hope that it will be useful, but it is provided “as is” and without any express or implied warranties. PLEASE READ THE ENTIRE README FILE CAREFULLY *BEFORE* BUILDING, TESTING AND INSTALLING this you will need to: Build, test and install Perl 5 (as per DBI specifications/compatibility) It is very important to TEST it and INSTALL it. Build, test and install the DBI module (at least DBI 1.609). It is very important to TEST it and INSTALL it. Remember to *read* the DBD::ODBC.pm POD documentation and the DBD::ODBC::Changes.pm POD documentation. BUILDING: set-up these environment variables: DBI_DSN The dbi data source, e.g. 'dbi:ODBC:YOUR_DSN_HERE' DBI_USER The username to use to connect to the database DBI_PASS The username to use to connect to the database ODBCHOME (Unix only) The dir your driver manager is installed in or specify this via -o argument to Makefile.PL If you want UNICODE support on non-Windows platforms specify -u switch to Makefile.PL. If you don't want UNICODE support on Windows specify the -nou switch to Makefile.PL. On non-Windows platforms all UNICODE features will work correctly with the unixODBC driver manager. If the Makefile.PL finds iODBC and you would prefer to build with unixODBC you can use the -x switch to favor unixODBC. This only happens in rare case where you have binary packages of both installed but you can only have development packages on one installed and DBD::ODBC locates iODBC first. This quite often happens on Ubuntu and other Debian based Linux systems where an incomplete iODBC is often installed. perl Makefile.PL make (or nmake/dmake, if VC++ on Win32) make test (or nmake/dmake, if VC++ on Win32) TESTING make test make test TEST_VERBOSE=1 (if any of the t/* tests fail) make install (if the tests look okay) Note that the tests currently all pass when using the Microsoft SQL server driver up to SQL Server 2008 and many other ODBC drivers on Windows and UNIX (too numerous to mention here). To run an individual test use: prove -vb t/test.t If the tests from t/09bind.t fail, that is an indication of lack of ODBC Level 2 functionality. This does not (necessarily) mean that your installation is broken. It does indicate that your ODBC driver does not support certain level 2 calls (see below). All other tests should pass, however, since I've tested with a limited number of ODBC drivers, I could have something wrong in the test. Please notify me if you have an incompatibility in the other tests. (For example, some Paradox ODBC drivers *could* fail, if they don't support long file names, since in Paradox file names are tableName.db.). Please let me know about any incompatibilities you encounter. I will say, though, that I may not be able to reproduce or fix problems without some help, since I can't possibly install all the known ODBC drivers. Please try to "solve" the problem, in addition to discovering it. If any of the tests fail and you are using SQL Server and Windows drivers, ensure you updated to MDAC 2.7 or later. NOTES on ODBC Drivers and Compatibility with DBD-ODBC This version utilizes at least one "Level 2" ODBC call (SQLDescribeParam). This *will* affect compatibility with specific ODBC drivers. If the driver is not level 2 (or does not support the level 2 function(s) required), then full compatibility will not be available. The best thing to do is build DBD::ODBC against an ODBC driver manager like unixODBC (http://www.unixodbc.org) as it will resolve many incompatibilities between ODBC versions for you. IF YOU HAVE PROBLEMS: Do not hand edit the generated Makefile unless you are completely sure you understand the implications! Always try to make changes via the Makefile.PL command line and/or editing the Makefile.PL. You should not need to make any changes. If you do *please* let me know so that I can try to make it automatic in a later release. This software is supported via the dbi-users mailing list. For more information and to keep informed about progress you can join the mailing list via http://dbi.perl.org (if you are unable to use the web you can subscribe by sending a message to dbi-users-subscribe@perl.org. Please post details of any problems (or changes you needed to make) to dbi-users@perl.org. But note... ** IT IS IMPORTANT TO INCLUDE THE FOLLOWING INFORMATION: 1. A complete log of a all steps of the build, e.g.: perl Makefile.PL (do a make realclean first) make make test make test TEST_VERBOSE=1 (if any tests fail) 2. Full details of which software you are using, including: Perl version (the output of perl -V) ODBC Driver and version ODBC Driver manager used and version It is important to check that you are using the latest version before posting. If you're not then I'm *very* likely to simply say "upgrade to the latest". You would do yourself a favour by upgrading beforehand. Please remember that I'm _very_ busy. Try to help yourself first, then try to help me help you by following these guidelines carefully. Regards, Tim, Jeff and Martin. =============================================================================== Other info: DBI 'home page': http://dbi.perl.org perldoc DBI perldoc DBD::ODBC (which will include the DBD::ODBC FAQ list) perldoc DBD::ODBC::Changes perldoc DBD::ODBC::FAQ Win32 ODBC drivers: http://www.microsoft.com/support/products/backoffice/odbc/ Follow "Microsoft ODBC Desktop Database Drivers 3.5 For 32-Bit Programs" and "ODBC Drivers for Win95 Applications". For Access use version "Access ODBC driver 3.40.2111 27/03/96" or later. Also, see the examples in the tests and the mytest directory. They *do* show some of the things you can do with the special DBD::ODBC functions. End.