PaxHeader/Net-Kafka-1.06000755 777777 777777 00000000360 13556003155 020406 xustar00amironovCORPAD\Domain Users000000 000000 17 gid=697783653 18 uid=2117034315 30 mtime=1572341357.063419018 30 ctime=1572341357.063419018 29 atime=1572341357.29578263 48 LIBARCHIVE.creationtime=1572341356.044537512 23 SCHILY.dev=16777220 26 SCHILY.ino=12902637717 19 SCHILY.nlink=18 Net-Kafka-1.06/000755 €~/aK€)—Ue00000000000 13556003155 017107 5ustar00amironovCORPAD\Domain Users000000 000000 Net-Kafka-1.06/PaxHeader/Changes000644 777777 777777 00000000360 13556002733 021754 xustar00amironovCORPAD\Domain Users000000 000000 17 gid=697783653 18 uid=2117034315 30 mtime=1572341211.314195079 30 ctime=1572341356.227787399 30 atime=1572341356.432270198 48 LIBARCHIVE.creationtime=1572341187.228520258 23 SCHILY.dev=16777220 26 SCHILY.ino=12902636792 18 SCHILY.nlink=2 Net-Kafka-1.06/Changes000644 €~/aK€)—Ue00000000505 13556002733 020403 0ustar00amironovCORPAD\Domain Users000000 000000 1.06 Tue Oct 29 2019 - Reworked `produce` and `metadata` methods - README improvements - Makefile improvements based on Metacpan testing results 1.05 Thu Oct 25 2019 - Added META_MERGE info for Metacpan build - Fixed Net::Kafka::Producer close sequence 1.04 Thu Oct 24 2019 First public version of Net::Kafka Net-Kafka-1.06/PaxHeader/MANIFEST000644 777777 777777 00000000346 13556003155 021615 xustar00amironovCORPAD\Domain Users000000 000000 17 gid=697783653 18 uid=2117034315 30 mtime=1572341357.259848068 30 ctime=1572341357.259848068 30 atime=1572341357.268172254 38 LIBARCHIVE.creationtime=1572003359 23 SCHILY.dev=16777220 26 SCHILY.ino=12902637725 18 SCHILY.nlink=1 Net-Kafka-1.06/MANIFEST000644 €~/aK€)—Ue00000001025 13556003155 020236 0ustar00amironovCORPAD\Domain Users000000 000000 .travis.yml Changes Kafka.xs lib/Net/Kafka.pm lib/Net/Kafka/Consumer.pm lib/Net/Kafka/Producer.pm lib/Net/Kafka/Util.pm Makefile.PL MANIFEST This list of files netkafka.c netkafka.h ppport.h README README.md scripts/generate_readme.sh t/00-load.t t/01-producer.t t/02-topic-list.t t/03-consumer.t t/constants.t t/lib/TestConsumer.pm t/lib/TestProducer.pm typemap META.yml Module YAML meta-data (added by MakeMaker) META.json Module JSON meta-data (added by MakeMaker) Net-Kafka-1.06/PaxHeader/netkafka.h000644 777777 777777 00000000300 13554325634 022416 xustar00amironovCORPAD\Domain Users000000 000000 17 gid=697783653 18 uid=2117034315 30 mtime=1571924892.689286481 30 ctime=1572341356.223414349 30 atime=1572341356.961037711 23 SCHILY.dev=16777220 26 SCHILY.ino=12902123332 18 SCHILY.nlink=3 Net-Kafka-1.06/netkafka.h000644 €~/aK€)—Ue00000002207 13554325634 021054 0ustar00amironovCORPAD\Domain Users000000 000000 #ifndef _NETKAFKAH_ #define _NETKAFKAH_ #include #include #include #define ERRSTR_SIZE 1024 #ifndef DEBUGLF #define DEBUGLF( flag, level, fmt, ... ) \ do { if ( flag >= level ) fprintf( stderr, "KafkaXS: " fmt "\n", ##__VA_ARGS__ ); } while (0) #endif #define DEBUGF( flag, fmt, ... ) DEBUGLF( flag, 1, fmt, ##__VA_ARGS__ ) #define DEBUG2F( flag, fmt, ... ) DEBUGLF( flag, 2, fmt, ##__VA_ARGS__ ) #define DEBUG3F( flag, fmt, ... ) DEBUGLF( flag, 3, fmt, ##__VA_ARGS__ ) typedef struct { SV *self; rd_kafka_t *rk; rd_kafka_queue_t *queue; IV thx; int type; int debug_xs; SV *rebalance_cb; SV *commit_cb; SV *error_cb; SV *stats_cb; int queue_fd; int is_closed; } plrd_kafka_t; void krd_close_handles(plrd_kafka_t *rdk); rd_kafka_conf_t* krd_parse_config(pTHX_ plrd_kafka_t *krd, HV* params); void prd_init(plrd_kafka_t *krd, rd_kafka_conf_t *conf); void cns_init(plrd_kafka_t *ctl, rd_kafka_conf_t *conf); void prd_start(plrd_kafka_t *ctl); void cns_start(plrd_kafka_t *ctl); void prd_stop(plrd_kafka_t *ctl); void cns_stop(plrd_kafka_t* ctl); #endif Net-Kafka-1.06/PaxHeader/ppport.h000644 777777 777777 00000000300 13554325634 022156 xustar00amironovCORPAD\Domain Users000000 000000 17 gid=697783653 18 uid=2117034315 30 mtime=1571924892.690121291 30 ctime=1572341356.070981799 30 atime=1572341356.875783449 23 SCHILY.dev=16777220 26 SCHILY.ino=12902123333 18 SCHILY.nlink=3 Net-Kafka-1.06/ppport.h000644 €~/aK€)—Ue00000237542 13554325634 020630 0ustar00amironovCORPAD\Domain Users000000 000000 #if 0 <<'SKIP'; #endif /* ---------------------------------------------------------------------- ppport.h -- Perl/Pollution/Portability Version 3.32 Automatically created by Devel::PPPort running under perl 5.024000. Version 3.x, Copyright (c) 2004-2013, Marcus Holland-Moritz. Version 2.x, Copyright (C) 2001, Paul Marquess. Version 1.x, Copyright (C) 1999, Kenneth Albanowski. This program is free software; you can redistribute it and/or modify it under the same terms as Perl itself. ---------------------------------------------------------------------- SKIP if (@ARGV && $ARGV[0] eq '--unstrip') { eval { require Devel::PPPort }; $@ and die "Cannot require Devel::PPPort, please install.\n"; if (eval $Devel::PPPort::VERSION < 3.32) { die "ppport.h was originally generated with Devel::PPPort 3.32.\n" . "Your Devel::PPPort is only version $Devel::PPPort::VERSION.\n" . "Please install a newer version, or --unstrip will not work.\n"; } Devel::PPPort::WriteFile($0); exit 0; } print < #endif #if !(defined(PERL_VERSION) || (defined(SUBVERSION) && defined(PATCHLEVEL))) #include #endif #ifndef PERL_REVISION #define PERL_REVISION (5) #define PERL_VERSION PATCHLEVEL #define PERL_SUBVERSION SUBVERSION #endif #endif #define _dpppDEC2BCD(dec) ((((dec)/100)<<8)|((((dec)%100)/10)<<4)|((dec)%10)) #define PERL_BCDVERSION ((_dpppDEC2BCD(PERL_REVISION)<<24)|(_dpppDEC2BCD(PERL_VERSION)<<12)|_dpppDEC2BCD(PERL_SUBVERSION)) #if PERL_REVISION != 5 #error ppport.h only works with Perl version 5 #endif #ifndef dTHR #define dTHR dNOOP #endif #ifndef dTHX #define dTHX dNOOP #endif #ifndef dTHXa #define dTHXa(x) dNOOP #endif #ifndef pTHX #define pTHX void #endif #ifndef pTHX_ #define pTHX_ #endif #ifndef aTHX #define aTHX #endif #ifndef aTHX_ #define aTHX_ #endif #if (PERL_BCDVERSION < 0x5006000) #ifdef USE_THREADS #define aTHXR thr #define aTHXR_ thr, #else #define aTHXR #define aTHXR_ #endif #define dTHXR dTHR #else #define aTHXR aTHX #define aTHXR_ aTHX_ #define dTHXR dTHX #endif #ifndef dTHXoa #define dTHXoa(x) dTHXa(x) #endif #ifdef I_LIMITS #include #endif #ifndef PERL_UCHAR_MIN #define PERL_UCHAR_MIN ((unsigned char)0) #endif #ifndef PERL_UCHAR_MAX #ifdef UCHAR_MAX #define PERL_UCHAR_MAX ((unsigned char)UCHAR_MAX) #else #ifdef MAXUCHAR #define PERL_UCHAR_MAX ((unsigned char)MAXUCHAR) #else #define PERL_UCHAR_MAX ((unsigned char)~(unsigned)0) #endif #endif #endif #ifndef PERL_USHORT_MIN #define PERL_USHORT_MIN ((unsigned short)0) #endif #ifndef PERL_USHORT_MAX #ifdef USHORT_MAX #define PERL_USHORT_MAX ((unsigned short)USHORT_MAX) #else #ifdef MAXUSHORT #define PERL_USHORT_MAX ((unsigned short)MAXUSHORT) #else #ifdef USHRT_MAX #define PERL_USHORT_MAX ((unsigned short)USHRT_MAX) #else #define PERL_USHORT_MAX ((unsigned short)~(unsigned)0) #endif #endif #endif #endif #ifndef PERL_SHORT_MAX #ifdef SHORT_MAX #define PERL_SHORT_MAX ((short)SHORT_MAX) #else #ifdef MAXSHORT #define PERL_SHORT_MAX ((short)MAXSHORT) #else #ifdef SHRT_MAX #define PERL_SHORT_MAX ((short)SHRT_MAX) #else #define PERL_SHORT_MAX ((short) (PERL_USHORT_MAX >> 1)) #endif #endif #endif #endif #ifndef PERL_SHORT_MIN #ifdef SHORT_MIN #define PERL_SHORT_MIN ((short)SHORT_MIN) #else #ifdef MINSHORT #define PERL_SHORT_MIN ((short)MINSHORT) #else #ifdef SHRT_MIN #define PERL_SHORT_MIN ((short)SHRT_MIN) #else #define PERL_SHORT_MIN (-PERL_SHORT_MAX - ((3 & -1) == 3)) #endif #endif #endif #endif #ifndef PERL_UINT_MAX #ifdef UINT_MAX #define PERL_UINT_MAX ((unsigned int)UINT_MAX) #else #ifdef MAXUINT #define PERL_UINT_MAX ((unsigned int)MAXUINT) #else #define PERL_UINT_MAX (~(unsigned int)0) #endif #endif #endif #ifndef PERL_UINT_MIN #define PERL_UINT_MIN ((unsigned int)0) #endif #ifndef PERL_INT_MAX #ifdef INT_MAX #define PERL_INT_MAX ((int)INT_MAX) #else #ifdef MAXINT #define PERL_INT_MAX ((int)MAXINT) #else #define PERL_INT_MAX ((int)(PERL_UINT_MAX >> 1)) #endif #endif #endif #ifndef PERL_INT_MIN #ifdef INT_MIN #define PERL_INT_MIN ((int)INT_MIN) #else #ifdef MININT #define PERL_INT_MIN ((int)MININT) #else #define PERL_INT_MIN (-PERL_INT_MAX - ((3 & -1) == 3)) #endif #endif #endif #ifndef PERL_ULONG_MAX #ifdef ULONG_MAX #define PERL_ULONG_MAX ((unsigned long)ULONG_MAX) #else #ifdef MAXULONG #define PERL_ULONG_MAX ((unsigned long)MAXULONG) #else #define PERL_ULONG_MAX (~(unsigned long)0) #endif #endif #endif #ifndef PERL_ULONG_MIN #define PERL_ULONG_MIN ((unsigned long)0L) #endif #ifndef PERL_LONG_MAX #ifdef LONG_MAX #define PERL_LONG_MAX ((long)LONG_MAX) #else #ifdef MAXLONG #define PERL_LONG_MAX ((long)MAXLONG) #else #define PERL_LONG_MAX ((long) (PERL_ULONG_MAX >> 1)) #endif #endif #endif #ifndef PERL_LONG_MIN #ifdef LONG_MIN #define PERL_LONG_MIN ((long)LONG_MIN) #else #ifdef MINLONG #define PERL_LONG_MIN ((long)MINLONG) #else #define PERL_LONG_MIN (-PERL_LONG_MAX - ((3 & -1) == 3)) #endif #endif #endif #if defined(HAS_QUAD) && (defined(convex) || defined(uts)) #ifndef PERL_UQUAD_MAX #ifdef ULONGLONG_MAX #define PERL_UQUAD_MAX ((unsigned long long)ULONGLONG_MAX) #else #ifdef MAXULONGLONG #define PERL_UQUAD_MAX ((unsigned long long)MAXULONGLONG) #else #define PERL_UQUAD_MAX (~(unsigned long long)0) #endif #endif #endif #ifndef PERL_UQUAD_MIN #define PERL_UQUAD_MIN ((unsigned long long)0L) #endif #ifndef PERL_QUAD_MAX #ifdef LONGLONG_MAX #define PERL_QUAD_MAX ((long long)LONGLONG_MAX) #else #ifdef MAXLONGLONG #define PERL_QUAD_MAX ((long long)MAXLONGLONG) #else #define PERL_QUAD_MAX ((long long) (PERL_UQUAD_MAX >> 1)) #endif #endif #endif #ifndef PERL_QUAD_MIN #ifdef LONGLONG_MIN #define PERL_QUAD_MIN ((long long)LONGLONG_MIN) #else #ifdef MINLONGLONG #define PERL_QUAD_MIN ((long long)MINLONGLONG) #else #define PERL_QUAD_MIN (-PERL_QUAD_MAX - ((3 & -1) == 3)) #endif #endif #endif #endif #ifdef HAS_QUAD #ifdef cray #ifndef IVTYPE #define IVTYPE int #endif #ifndef IV_MIN #define IV_MIN PERL_INT_MIN #endif #ifndef IV_MAX #define IV_MAX PERL_INT_MAX #endif #ifndef UV_MIN #define UV_MIN PERL_UINT_MIN #endif #ifndef UV_MAX #define UV_MAX PERL_UINT_MAX #endif #ifdef INTSIZE #ifndef IVSIZE #define IVSIZE INTSIZE #endif #endif #else #if defined(convex) || defined(uts) #ifndef IVTYPE #define IVTYPE long long #endif #ifndef IV_MIN #define IV_MIN PERL_QUAD_MIN #endif #ifndef IV_MAX #define IV_MAX PERL_QUAD_MAX #endif #ifndef UV_MIN #define UV_MIN PERL_UQUAD_MIN #endif #ifndef UV_MAX #define UV_MAX PERL_UQUAD_MAX #endif #ifdef LONGLONGSIZE #ifndef IVSIZE #define IVSIZE LONGLONGSIZE #endif #endif #else #ifndef IVTYPE #define IVTYPE long #endif #ifndef IV_MIN #define IV_MIN PERL_LONG_MIN #endif #ifndef IV_MAX #define IV_MAX PERL_LONG_MAX #endif #ifndef UV_MIN #define UV_MIN PERL_ULONG_MIN #endif #ifndef UV_MAX #define UV_MAX PERL_ULONG_MAX #endif #ifdef LONGSIZE #ifndef IVSIZE #define IVSIZE LONGSIZE #endif #endif #endif #endif #ifndef IVSIZE #define IVSIZE 8 #endif #ifndef LONGSIZE #define LONGSIZE 8 #endif #ifndef PERL_QUAD_MIN #define PERL_QUAD_MIN IV_MIN #endif #ifndef PERL_QUAD_MAX #define PERL_QUAD_MAX IV_MAX #endif #ifndef PERL_UQUAD_MIN #define PERL_UQUAD_MIN UV_MIN #endif #ifndef PERL_UQUAD_MAX #define PERL_UQUAD_MAX UV_MAX #endif #else #ifndef IVTYPE #define IVTYPE long #endif #ifndef LONGSIZE #define LONGSIZE 4 #endif #ifndef IV_MIN #define IV_MIN PERL_LONG_MIN #endif #ifndef IV_MAX #define IV_MAX PERL_LONG_MAX #endif #ifndef UV_MIN #define UV_MIN PERL_ULONG_MIN #endif #ifndef UV_MAX #define UV_MAX PERL_ULONG_MAX #endif #endif #ifndef IVSIZE #ifdef LONGSIZE #define IVSIZE LONGSIZE #else #define IVSIZE 4 #endif #endif #ifndef UVTYPE #define UVTYPE unsigned IVTYPE #endif #ifndef UVSIZE #define UVSIZE IVSIZE #endif #ifndef sv_setuv #define sv_setuv(sv, uv) \ STMT_START { \ UV TeMpUv = uv; \ if (TeMpUv <= IV_MAX) \ sv_setiv(sv, TeMpUv); \ else \ sv_setnv(sv, (double)TeMpUv); \ } STMT_END #endif #ifndef newSVuv #define newSVuv(uv) ((uv) <= IV_MAX ? newSViv((IV)uv) : newSVnv((NV)uv)) #endif #ifndef sv_2uv #define sv_2uv(sv) ((PL_Sv = (sv)), (UV) (SvNOK(PL_Sv) ? SvNV(PL_Sv) : sv_2nv(PL_Sv))) #endif #ifndef SvUVX #define SvUVX(sv) ((UV)SvIVX(sv)) #endif #ifndef SvUVXx #define SvUVXx(sv) SvUVX(sv) #endif #ifndef SvUV #define SvUV(sv) (SvIOK(sv) ? SvUVX(sv) : sv_2uv(sv)) #endif #ifndef SvUVx #define SvUVx(sv) ((PL_Sv = (sv)), SvUV(PL_Sv)) #endif #ifndef sv_uv #define sv_uv(sv) SvUVx(sv) #endif #if !defined(SvUOK) && defined(SvIOK_UV) #define SvUOK(sv) SvIOK_UV(sv) #endif #ifndef XST_mUV #define XST_mUV(i,v) (ST(i) = sv_2mortal(newSVuv(v)) ) #endif #ifndef XSRETURN_UV #define XSRETURN_UV(v) STMT_START { XST_mUV(0,v); XSRETURN(1); } STMT_END #endif #ifndef PUSHu #define PUSHu(u) STMT_START { sv_setuv(TARG, (UV)(u)); PUSHTARG; } STMT_END #endif #ifndef XPUSHu #define XPUSHu(u) STMT_START { sv_setuv(TARG, (UV)(u)); XPUSHTARG; } STMT_END #endif #ifdef HAS_MEMCMP #ifndef memNE #define memNE(s1,s2,l) (memcmp(s1,s2,l)) #endif #ifndef memEQ #define memEQ(s1,s2,l) (!memcmp(s1,s2,l)) #endif #else #ifndef memNE #define memNE(s1,s2,l) (bcmp(s1,s2,l)) #endif #ifndef memEQ #define memEQ(s1,s2,l) (!bcmp(s1,s2,l)) #endif #endif #ifndef memEQs #define memEQs(s1, l, s2) \ (sizeof(s2)-1 == l && memEQ(s1, (s2 ""), (sizeof(s2)-1))) #endif #ifndef memNEs #define memNEs(s1, l, s2) !memEQs(s1, l, s2) #endif #ifndef MoveD #define MoveD(s,d,n,t) memmove((char*)(d),(char*)(s), (n) * sizeof(t)) #endif #ifndef CopyD #define CopyD(s,d,n,t) memcpy((char*)(d),(char*)(s), (n) * sizeof(t)) #endif #ifdef HAS_MEMSET #ifndef ZeroD #define ZeroD(d,n,t) memzero((char*)(d), (n) * sizeof(t)) #endif #else #ifndef ZeroD #define ZeroD(d,n,t) ((void)memzero((char*)(d), (n) * sizeof(t)), d) #endif #endif #ifndef PoisonWith #define PoisonWith(d,n,t,b) (void)memset((char*)(d), (U8)(b), (n) * sizeof(t)) #endif #ifndef PoisonNew #define PoisonNew(d,n,t) PoisonWith(d,n,t,0xAB) #endif #ifndef PoisonFree #define PoisonFree(d,n,t) PoisonWith(d,n,t,0xEF) #endif #ifndef Poison #define Poison(d,n,t) PoisonFree(d,n,t) #endif #ifndef Newx #define Newx(v,n,t) New(0,v,n,t) #endif #ifndef Newxc #define Newxc(v,n,t,c) Newc(0,v,n,t,c) #endif #ifndef Newxz #define Newxz(v,n,t) Newz(0,v,n,t) #endif #ifndef PERL_UNUSED_DECL #ifdef HASATTRIBUTE #if (defined(__GNUC__) && defined(__cplusplus)) || defined(__INTEL_COMPILER) #define PERL_UNUSED_DECL #else #define PERL_UNUSED_DECL __attribute__((unused)) #endif #else #define PERL_UNUSED_DECL #endif #endif #ifndef PERL_UNUSED_ARG #if defined(lint) && defined(S_SPLINT_S) #include #define PERL_UNUSED_ARG(x) NOTE(ARGUNUSED(x)) #else #define PERL_UNUSED_ARG(x) ((void)x) #endif #endif #ifndef PERL_UNUSED_VAR #define PERL_UNUSED_VAR(x) ((void)x) #endif #ifndef PERL_UNUSED_CONTEXT #ifdef USE_ITHREADS #define PERL_UNUSED_CONTEXT PERL_UNUSED_ARG(my_perl) #else #define PERL_UNUSED_CONTEXT #endif #endif #ifndef NOOP #define NOOP (void)0 #endif #ifndef dNOOP #define dNOOP extern int Perl___notused PERL_UNUSED_DECL #endif #ifndef NVTYPE #if defined(USE_LONG_DOUBLE) && defined(HAS_LONG_DOUBLE) #define NVTYPE long double #else #define NVTYPE double #endif typedef NVTYPE NV; #endif #ifndef INT2PTR #if (IVSIZE == PTRSIZE) && (UVSIZE == PTRSIZE) #define PTRV UV #define INT2PTR(any,d) (any)(d) #else #if PTRSIZE == LONGSIZE #define PTRV unsigned long #else #define PTRV unsigned #endif #define INT2PTR(any,d) (any)(PTRV)(d) #endif #endif #ifndef PTR2ul #if PTRSIZE == LONGSIZE #define PTR2ul(p) (unsigned long)(p) #else #define PTR2ul(p) INT2PTR(unsigned long,p) #endif #endif #ifndef PTR2nat #define PTR2nat(p) (PTRV)(p) #endif #ifndef NUM2PTR #define NUM2PTR(any,d) (any)PTR2nat(d) #endif #ifndef PTR2IV #define PTR2IV(p) INT2PTR(IV,p) #endif #ifndef PTR2UV #define PTR2UV(p) INT2PTR(UV,p) #endif #ifndef PTR2NV #define PTR2NV(p) NUM2PTR(NV,p) #endif #undef START_EXTERN_C #undef END_EXTERN_C #undef EXTERN_C #ifdef __cplusplus #define START_EXTERN_C extern "C" { #define END_EXTERN_C } #define EXTERN_C extern "C" #else #define START_EXTERN_C #define END_EXTERN_C #define EXTERN_C extern #endif #if defined(PERL_GCC_PEDANTIC) #ifndef PERL_GCC_BRACE_GROUPS_FORBIDDEN #define PERL_GCC_BRACE_GROUPS_FORBIDDEN #endif #endif #if defined(__GNUC__) && !defined(PERL_GCC_BRACE_GROUPS_FORBIDDEN) && !defined(__cplusplus) #ifndef PERL_USE_GCC_BRACE_GROUPS #define PERL_USE_GCC_BRACE_GROUPS #endif #endif #undef STMT_START #undef STMT_END #ifdef PERL_USE_GCC_BRACE_GROUPS #define STMT_START (void)( #define STMT_END ) #else #if defined(VOIDFLAGS) && (VOIDFLAGS) && (defined(sun) || defined(__sun__)) && !defined(__GNUC__) #define STMT_START if (1) #define STMT_END else (void)0 #else #define STMT_START do #define STMT_END while (0) #endif #endif #ifndef boolSV #define boolSV(b) ((b) ? &PL_sv_yes : &PL_sv_no) #endif #ifndef DEFSV #define DEFSV GvSV(PL_defgv) #endif #ifndef SAVE_DEFSV #define SAVE_DEFSV SAVESPTR(GvSV(PL_defgv)) #endif #ifndef DEFSV_set #define DEFSV_set(sv) (DEFSV = (sv)) #endif #ifndef AvFILLp #define AvFILLp AvFILL #endif #ifndef ERRSV #define ERRSV get_sv("@",FALSE) #endif #ifndef gv_stashpvn #define gv_stashpvn(str,len,create) gv_stashpv(str,create) #endif #ifndef get_cv #define get_cv perl_get_cv #endif #ifndef get_sv #define get_sv perl_get_sv #endif #ifndef get_av #define get_av perl_get_av #endif #ifndef get_hv #define get_hv perl_get_hv #endif #ifndef dUNDERBAR #define dUNDERBAR dNOOP #endif #ifndef UNDERBAR #define UNDERBAR DEFSV #endif #ifndef dAX #define dAX I32 ax = MARK - PL_stack_base + 1 #endif #ifndef dITEMS #define dITEMS I32 items = SP - MARK #endif #ifndef dXSTARG #define dXSTARG SV * targ = sv_newmortal() #endif #ifndef dAXMARK #define dAXMARK I32 ax = POPMARK; \ register SV ** const mark = PL_stack_base + ax++ #endif #ifndef XSprePUSH #define XSprePUSH (sp = PL_stack_base + ax - 1) #endif #if (PERL_BCDVERSION < 0x5005000) #undef XSRETURN #define XSRETURN(off) \ STMT_START { \ PL_stack_sp = PL_stack_base + ax + ((off) - 1); \ return; \ } STMT_END #endif #ifndef XSPROTO #define XSPROTO(name) void name(pTHX_ CV* cv) #endif #ifndef SVfARG #define SVfARG(p) ((void*)(p)) #endif #ifndef PERL_ABS #define PERL_ABS(x) ((x) < 0 ? -(x) : (x)) #endif #ifndef dVAR #define dVAR dNOOP #endif #ifndef SVf #define SVf "_" #endif #ifndef UTF8_MAXBYTES #define UTF8_MAXBYTES UTF8_MAXLEN #endif #ifndef CPERLscope #define CPERLscope(x) x #endif #ifndef PERL_HASH #define PERL_HASH(hash,str,len) \ STMT_START { \ const char *s_PeRlHaSh = str; \ I32 i_PeRlHaSh = len; \ U32 hash_PeRlHaSh = 0; \ while (i_PeRlHaSh--) \ hash_PeRlHaSh = hash_PeRlHaSh * 33 + *s_PeRlHaSh++; \ (hash) = hash_PeRlHaSh; \ } STMT_END #endif #ifndef PERLIO_FUNCS_DECL #ifdef PERLIO_FUNCS_CONST #define PERLIO_FUNCS_DECL(funcs) const PerlIO_funcs funcs #define PERLIO_FUNCS_CAST(funcs) (PerlIO_funcs*)(funcs) #else #define PERLIO_FUNCS_DECL(funcs) PerlIO_funcs funcs #define PERLIO_FUNCS_CAST(funcs) (funcs) #endif #endif #if (PERL_BCDVERSION < 0x5009003) #ifdef ARGSproto typedef OP* (CPERLscope(*Perl_ppaddr_t))(ARGSproto); #else typedef OP* (CPERLscope(*Perl_ppaddr_t))(pTHX); #endif typedef OP* (CPERLscope(*Perl_check_t)) (pTHX_ OP*); #endif #ifndef isPSXSPC #define isPSXSPC(c) (isSPACE(c) || (c) == '\v') #endif #ifndef isBLANK #define isBLANK(c) ((c) == ' ' || (c) == '\t') #endif #ifdef EBCDIC #ifndef isALNUMC #define isALNUMC(c) isalnum(c) #endif #ifndef isASCII #define isASCII(c) isascii(c) #endif #ifndef isCNTRL #define isCNTRL(c) iscntrl(c) #endif #ifndef isGRAPH #define isGRAPH(c) isgraph(c) #endif #ifndef isPRINT #define isPRINT(c) isprint(c) #endif #ifndef isPUNCT #define isPUNCT(c) ispunct(c) #endif #ifndef isXDIGIT #define isXDIGIT(c) isxdigit(c) #endif #else #if (PERL_BCDVERSION < 0x5010000) #undef isPRINT #endif #ifdef HAS_QUAD #ifdef U64TYPE #define WIDEST_UTYPE U64TYPE #else #define WIDEST_UTYPE Quad_t #endif #else #define WIDEST_UTYPE U32 #endif #ifndef isALNUMC #define isALNUMC(c) (isALPHA(c) || isDIGIT(c)) #endif #ifndef isASCII #define isASCII(c) ((WIDEST_UTYPE) (c) <= 127) #endif #ifndef isCNTRL #define isCNTRL(c) ((WIDEST_UTYPE) (c) < ' ' || (c) == 127) #endif #ifndef isGRAPH #define isGRAPH(c) (isALNUM(c) || isPUNCT(c)) #endif #ifndef isPRINT #define isPRINT(c) (((c) >= 32 && (c) < 127)) #endif #ifndef isPUNCT #define isPUNCT(c) (((c) >= 33 && (c) <= 47) || ((c) >= 58 && (c) <= 64) || ((c) >= 91 && (c) <= 96) || ((c) >= 123 && (c) <= 126)) #endif #ifndef isXDIGIT #define isXDIGIT(c) (isDIGIT(c) || ((c) >= 'a' && (c) <= 'f') || ((c) >= 'A' && (c) <= 'F')) #endif #endif #if (PERL_BCDVERSION >= 0x5008000) #ifndef HeUTF8 #define HeUTF8(he) ((HeKLEN(he) == HEf_SVKEY) ? \ SvUTF8(HeKEY_sv(he)) : \ (U32)HeKUTF8(he)) #endif #endif #ifndef PERL_SIGNALS_UNSAFE_FLAG #define PERL_SIGNALS_UNSAFE_FLAG 0x0001 #if (PERL_BCDVERSION < 0x5008000) #define D_PPP_PERL_SIGNALS_INIT PERL_SIGNALS_UNSAFE_FLAG #else #define D_PPP_PERL_SIGNALS_INIT 0 #endif #if defined(NEED_PL_signals) static U32 DPPP_(my_PL_signals) = D_PPP_PERL_SIGNALS_INIT; #elif defined(NEED_PL_signals_GLOBAL) U32 DPPP_(my_PL_signals) = D_PPP_PERL_SIGNALS_INIT; #else extern U32 DPPP_(my_PL_signals); #endif #define PL_signals DPPP_(my_PL_signals) #endif #if (PERL_BCDVERSION <= 0x5005005) #define PL_ppaddr ppaddr #define PL_no_modify no_modify #endif #if (PERL_BCDVERSION <= 0x5004005) #define PL_DBsignal DBsignal #define PL_DBsingle DBsingle #define PL_DBsub DBsub #define PL_DBtrace DBtrace #define PL_Sv Sv #define PL_bufend bufend #define PL_bufptr bufptr #define PL_compiling compiling #define PL_copline copline #define PL_curcop curcop #define PL_curstash curstash #define PL_debstash debstash #define PL_defgv defgv #define PL_diehook diehook #define PL_dirty dirty #define PL_dowarn dowarn #define PL_errgv errgv #define PL_error_count error_count #define PL_expect expect #define PL_hexdigit hexdigit #define PL_hints hints #define PL_in_my in_my #define PL_laststatval laststatval #define PL_lex_state lex_state #define PL_lex_stuff lex_stuff #define PL_linestr linestr #define PL_na na #define PL_perl_destruct_level perl_destruct_level #define PL_perldb perldb #define PL_rsfp_filters rsfp_filters #define PL_rsfp rsfp #define PL_stack_base stack_base #define PL_stack_sp stack_sp #define PL_statcache statcache #define PL_stdingv stdingv #define PL_sv_arenaroot sv_arenaroot #define PL_sv_no sv_no #define PL_sv_undef sv_undef #define PL_sv_yes sv_yes #define PL_tainted tainted #define PL_tainting tainting #define PL_tokenbuf tokenbuf #endif #if (PERL_BCDVERSION >= 0x5009005) #ifdef DPPP_PL_parser_NO_DUMMY #define D_PPP_my_PL_parser_var(var) ((PL_parser ? PL_parser : \ (croak("panic: PL_parser == NULL in %s:%d", \ __FILE__, __LINE__), (yy_parser *) NULL))->var) #else #ifdef DPPP_PL_parser_NO_DUMMY_WARNING #define D_PPP_parser_dummy_warning(var) #else #define D_PPP_parser_dummy_warning(var) \ warn("warning: dummy PL_" #var " used in %s:%d", __FILE__, __LINE__), #endif #define D_PPP_my_PL_parser_var(var) ((PL_parser ? PL_parser : \ (D_PPP_parser_dummy_warning(var) &DPPP_(dummy_PL_parser)))->var) #if defined(NEED_PL_parser) static yy_parser DPPP_(dummy_PL_parser); #elif defined(NEED_PL_parser_GLOBAL) yy_parser DPPP_(dummy_PL_parser); #else extern yy_parser DPPP_(dummy_PL_parser); #endif #endif #define PL_expect D_PPP_my_PL_parser_var(expect) #define PL_copline D_PPP_my_PL_parser_var(copline) #define PL_rsfp D_PPP_my_PL_parser_var(rsfp) #define PL_rsfp_filters D_PPP_my_PL_parser_var(rsfp_filters) #define PL_linestr D_PPP_my_PL_parser_var(linestr) #define PL_bufptr D_PPP_my_PL_parser_var(bufptr) #define PL_bufend D_PPP_my_PL_parser_var(bufend) #define PL_lex_state D_PPP_my_PL_parser_var(lex_state) #define PL_lex_stuff D_PPP_my_PL_parser_var(lex_stuff) #define PL_tokenbuf D_PPP_my_PL_parser_var(tokenbuf) #define PL_in_my D_PPP_my_PL_parser_var(in_my) #define PL_in_my_stash D_PPP_my_PL_parser_var(in_my_stash) #define PL_error_count D_PPP_my_PL_parser_var(error_count) #else #define PL_parser ((void *) 1) #endif #ifndef mPUSHs #define mPUSHs(s) PUSHs(sv_2mortal(s)) #endif #ifndef PUSHmortal #define PUSHmortal PUSHs(sv_newmortal()) #endif #ifndef mPUSHp #define mPUSHp(p,l) sv_setpvn(PUSHmortal, (p), (l)) #endif #ifndef mPUSHn #define mPUSHn(n) sv_setnv(PUSHmortal, (NV)(n)) #endif #ifndef mPUSHi #define mPUSHi(i) sv_setiv(PUSHmortal, (IV)(i)) #endif #ifndef mPUSHu #define mPUSHu(u) sv_setuv(PUSHmortal, (UV)(u)) #endif #ifndef mXPUSHs #define mXPUSHs(s) XPUSHs(sv_2mortal(s)) #endif #ifndef XPUSHmortal #define XPUSHmortal XPUSHs(sv_newmortal()) #endif #ifndef mXPUSHp #define mXPUSHp(p,l) STMT_START { EXTEND(sp,1); sv_setpvn(PUSHmortal, (p), (l)); } STMT_END #endif #ifndef mXPUSHn #define mXPUSHn(n) STMT_START { EXTEND(sp,1); sv_setnv(PUSHmortal, (NV)(n)); } STMT_END #endif #ifndef mXPUSHi #define mXPUSHi(i) STMT_START { EXTEND(sp,1); sv_setiv(PUSHmortal, (IV)(i)); } STMT_END #endif #ifndef mXPUSHu #define mXPUSHu(u) STMT_START { EXTEND(sp,1); sv_setuv(PUSHmortal, (UV)(u)); } STMT_END #endif #ifndef call_sv #define call_sv perl_call_sv #endif #ifndef call_pv #define call_pv perl_call_pv #endif #ifndef call_argv #define call_argv perl_call_argv #endif #ifndef call_method #define call_method perl_call_method #endif #ifndef eval_sv #define eval_sv perl_eval_sv #endif #ifndef PERL_LOADMOD_DENY #define PERL_LOADMOD_DENY 0x1 #endif #ifndef PERL_LOADMOD_NOIMPORT #define PERL_LOADMOD_NOIMPORT 0x2 #endif #ifndef PERL_LOADMOD_IMPORT_OPS #define PERL_LOADMOD_IMPORT_OPS 0x4 #endif #ifndef G_METHOD #define G_METHOD 64 #ifdef call_sv #undef call_sv #endif #if (PERL_BCDVERSION < 0x5006000) #define call_sv(sv, flags) ((flags) & G_METHOD ? perl_call_method((char *) SvPV_nolen_const(sv), \ (flags) & ~G_METHOD) : perl_call_sv(sv, flags)) #else #define call_sv(sv, flags) ((flags) & G_METHOD ? Perl_call_method(aTHX_ (char *) SvPV_nolen_const(sv), \ (flags) & ~G_METHOD) : Perl_call_sv(aTHX_ sv, flags)) #endif #endif #ifndef eval_pv #if defined(NEED_eval_pv) static SV* DPPP_(my_eval_pv)(char *p, I32 croak_on_error); static #else extern SV* DPPP_(my_eval_pv)(char *p, I32 croak_on_error); #endif #ifdef eval_pv #undef eval_pv #endif #define eval_pv(a,b) DPPP_(my_eval_pv)(aTHX_ a,b) #define Perl_eval_pv DPPP_(my_eval_pv) #if defined(NEED_eval_pv) || defined(NEED_eval_pv_GLOBAL) SV* DPPP_(my_eval_pv)(char *p, I32 croak_on_error) { dSP; SV* sv = newSVpv(p, 0); PUSHMARK(sp); eval_sv(sv, G_SCALAR); SvREFCNT_dec(sv); SPAGAIN; sv = POPs; PUTBACK; if (croak_on_error && SvTRUE(GvSV(errgv))) croak(SvPVx(GvSV(errgv), na)); return sv; } #endif #endif #ifndef vload_module #if defined(NEED_vload_module) static void DPPP_(my_vload_module)(U32 flags, SV *name, SV *ver, va_list *args); static #else extern void DPPP_(my_vload_module)(U32 flags, SV *name, SV *ver, va_list *args); #endif #ifdef vload_module #undef vload_module #endif #define vload_module(a,b,c,d) DPPP_(my_vload_module)(aTHX_ a,b,c,d) #define Perl_vload_module DPPP_(my_vload_module) #if defined(NEED_vload_module) || defined(NEED_vload_module_GLOBAL) void DPPP_(my_vload_module)(U32 flags, SV *name, SV *ver, va_list *args) { dTHR; dVAR; OP *veop, *imop; OP * const modname = newSVOP(OP_CONST, 0, name); SvREADONLY_off(((SVOP*)modname)->op_sv); modname->op_private |= OPpCONST_BARE; if (ver) { veop = newSVOP(OP_CONST, 0, ver); } else veop = NULL; if (flags & PERL_LOADMOD_NOIMPORT) { imop = sawparens(newNULLLIST()); } else if (flags & PERL_LOADMOD_IMPORT_OPS) { imop = va_arg(*args, OP*); } else { SV *sv; imop = NULL; sv = va_arg(*args, SV*); while (sv) { imop = append_elem(OP_LIST, imop, newSVOP(OP_CONST, 0, sv)); sv = va_arg(*args, SV*); } } { const line_t ocopline = PL_copline; COP * const ocurcop = PL_curcop; const int oexpect = PL_expect; #if (PERL_BCDVERSION >= 0x5004000) utilize(!(flags & PERL_LOADMOD_DENY), start_subparse(FALSE, 0), veop, modname, imop); #elif (PERL_BCDVERSION > 0x5003000) utilize(!(flags & PERL_LOADMOD_DENY), start_subparse(), veop, modname, imop); #else utilize(!(flags & PERL_LOADMOD_DENY), start_subparse(), modname, imop); #endif PL_expect = oexpect; PL_copline = ocopline; PL_curcop = ocurcop; } } #endif #endif #ifndef load_module #if defined(NEED_load_module) static void DPPP_(my_load_module)(U32 flags, SV *name, SV *ver, ...); static #else extern void DPPP_(my_load_module)(U32 flags, SV *name, SV *ver, ...); #endif #ifdef load_module #undef load_module #endif #define load_module DPPP_(my_load_module) #define Perl_load_module DPPP_(my_load_module) #if defined(NEED_load_module) || defined(NEED_load_module_GLOBAL) void DPPP_(my_load_module)(U32 flags, SV *name, SV *ver, ...) { va_list args; va_start(args, ver); vload_module(flags, name, ver, &args); va_end(args); } #endif #endif #ifndef newRV_inc #define newRV_inc(sv) newRV(sv) #endif #ifndef newRV_noinc #if defined(NEED_newRV_noinc) static SV * DPPP_(my_newRV_noinc)(SV *sv); static #else extern SV * DPPP_(my_newRV_noinc)(SV *sv); #endif #ifdef newRV_noinc #undef newRV_noinc #endif #define newRV_noinc(a) DPPP_(my_newRV_noinc)(aTHX_ a) #define Perl_newRV_noinc DPPP_(my_newRV_noinc) #if defined(NEED_newRV_noinc) || defined(NEED_newRV_noinc_GLOBAL) SV * DPPP_(my_newRV_noinc)(SV *sv) { SV *rv = (SV *)newRV(sv); SvREFCNT_dec(sv); return rv; } #endif #endif #if (PERL_BCDVERSION < 0x5004063) && (PERL_BCDVERSION != 0x5004005) #if defined(NEED_newCONSTSUB) static void DPPP_(my_newCONSTSUB)(HV *stash, const char *name, SV *sv); static #else extern void DPPP_(my_newCONSTSUB)(HV *stash, const char *name, SV *sv); #endif #ifdef newCONSTSUB #undef newCONSTSUB #endif #define newCONSTSUB(a,b,c) DPPP_(my_newCONSTSUB)(aTHX_ a,b,c) #define Perl_newCONSTSUB DPPP_(my_newCONSTSUB) #if defined(NEED_newCONSTSUB) || defined(NEED_newCONSTSUB_GLOBAL) #define D_PPP_PL_copline PL_copline void DPPP_(my_newCONSTSUB)(HV *stash, const char *name, SV *sv) { U32 oldhints = PL_hints; HV *old_cop_stash = PL_curcop->cop_stash; HV *old_curstash = PL_curstash; line_t oldline = PL_curcop->cop_line; PL_curcop->cop_line = D_PPP_PL_copline; PL_hints &= ~HINT_BLOCK_SCOPE; if (stash) PL_curstash = PL_curcop->cop_stash = stash; newSUB( #if (PERL_BCDVERSION < 0x5003022) start_subparse(), #elif (PERL_BCDVERSION == 0x5003022) start_subparse(0), #else start_subparse(FALSE, 0), #endif newSVOP(OP_CONST, 0, newSVpv((char *) name, 0)), newSVOP(OP_CONST, 0, &PL_sv_no), newSTATEOP(0, Nullch, newSVOP(OP_CONST, 0, sv)) ); PL_hints = oldhints; PL_curcop->cop_stash = old_cop_stash; PL_curstash = old_curstash; PL_curcop->cop_line = oldline; } #endif #endif #if defined(MULTIPLICITY) || defined(PERL_OBJECT) || \ defined(PERL_CAPI) || defined(PERL_IMPLICIT_CONTEXT) #ifndef START_MY_CXT #define START_MY_CXT #if (PERL_BCDVERSION < 0x5004068) #define dMY_CXT_SV \ SV *my_cxt_sv = get_sv(MY_CXT_KEY, FALSE) #else #define dMY_CXT_SV \ SV *my_cxt_sv = *hv_fetch(PL_modglobal, MY_CXT_KEY, \ sizeof(MY_CXT_KEY)-1, TRUE) #endif #define dMY_CXT \ dMY_CXT_SV; \ my_cxt_t *my_cxtp = INT2PTR(my_cxt_t*,SvUV(my_cxt_sv)) #define MY_CXT_INIT \ dMY_CXT_SV; \ \ my_cxt_t *my_cxtp = (my_cxt_t*)SvPVX(newSV(sizeof(my_cxt_t)-1));\ Zero(my_cxtp, 1, my_cxt_t); \ sv_setuv(my_cxt_sv, PTR2UV(my_cxtp)) #define MY_CXT (*my_cxtp) #define pMY_CXT my_cxt_t *my_cxtp #define pMY_CXT_ pMY_CXT, #define _pMY_CXT ,pMY_CXT #define aMY_CXT my_cxtp #define aMY_CXT_ aMY_CXT, #define _aMY_CXT ,aMY_CXT #endif #ifndef MY_CXT_CLONE #define MY_CXT_CLONE \ dMY_CXT_SV; \ my_cxt_t *my_cxtp = (my_cxt_t*)SvPVX(newSV(sizeof(my_cxt_t)-1));\ Copy(INT2PTR(my_cxt_t*, SvUV(my_cxt_sv)), my_cxtp, 1, my_cxt_t);\ sv_setuv(my_cxt_sv, PTR2UV(my_cxtp)) #endif #else #ifndef START_MY_CXT #define START_MY_CXT static my_cxt_t my_cxt; #define dMY_CXT_SV dNOOP #define dMY_CXT dNOOP #define MY_CXT_INIT NOOP #define MY_CXT my_cxt #define pMY_CXT void #define pMY_CXT_ #define _pMY_CXT #define aMY_CXT #define aMY_CXT_ #define _aMY_CXT #endif #ifndef MY_CXT_CLONE #define MY_CXT_CLONE NOOP #endif #endif #ifndef IVdf #if IVSIZE == LONGSIZE #define IVdf "ld" #define UVuf "lu" #define UVof "lo" #define UVxf "lx" #define UVXf "lX" #elif IVSIZE == INTSIZE #define IVdf "d" #define UVuf "u" #define UVof "o" #define UVxf "x" #define UVXf "X" #else #error "cannot define IV/UV formats" #endif #endif #ifndef NVef #if defined(USE_LONG_DOUBLE) && defined(HAS_LONG_DOUBLE) && \ defined(PERL_PRIfldbl) && (PERL_BCDVERSION != 0x5006000) #define NVef PERL_PRIeldbl #define NVff PERL_PRIfldbl #define NVgf PERL_PRIgldbl #else #define NVef "e" #define NVff "f" #define NVgf "g" #endif #endif #ifndef SvREFCNT_inc #ifdef PERL_USE_GCC_BRACE_GROUPS #define SvREFCNT_inc(sv) \ ({ \ SV * const _sv = (SV*)(sv); \ if (_sv) \ (SvREFCNT(_sv))++; \ _sv; \ }) #else #define SvREFCNT_inc(sv) \ ((PL_Sv=(SV*)(sv)) ? (++(SvREFCNT(PL_Sv)),PL_Sv) : NULL) #endif #endif #ifndef SvREFCNT_inc_simple #ifdef PERL_USE_GCC_BRACE_GROUPS #define SvREFCNT_inc_simple(sv) \ ({ \ if (sv) \ (SvREFCNT(sv))++; \ (SV *)(sv); \ }) #else #define SvREFCNT_inc_simple(sv) \ ((sv) ? (SvREFCNT(sv)++,(SV*)(sv)) : NULL) #endif #endif #ifndef SvREFCNT_inc_NN #ifdef PERL_USE_GCC_BRACE_GROUPS #define SvREFCNT_inc_NN(sv) \ ({ \ SV * const _sv = (SV*)(sv); \ SvREFCNT(_sv)++; \ _sv; \ }) #else #define SvREFCNT_inc_NN(sv) \ (PL_Sv=(SV*)(sv),++(SvREFCNT(PL_Sv)),PL_Sv) #endif #endif #ifndef SvREFCNT_inc_void #ifdef PERL_USE_GCC_BRACE_GROUPS #define SvREFCNT_inc_void(sv) \ ({ \ SV * const _sv = (SV*)(sv); \ if (_sv) \ (void)(SvREFCNT(_sv)++); \ }) #else #define SvREFCNT_inc_void(sv) \ (void)((PL_Sv=(SV*)(sv)) ? ++(SvREFCNT(PL_Sv)) : 0) #endif #endif #ifndef SvREFCNT_inc_simple_void #define SvREFCNT_inc_simple_void(sv) STMT_START { if (sv) SvREFCNT(sv)++; } STMT_END #endif #ifndef SvREFCNT_inc_simple_NN #define SvREFCNT_inc_simple_NN(sv) (++SvREFCNT(sv), (SV*)(sv)) #endif #ifndef SvREFCNT_inc_void_NN #define SvREFCNT_inc_void_NN(sv) (void)(++SvREFCNT((SV*)(sv))) #endif #ifndef SvREFCNT_inc_simple_void_NN #define SvREFCNT_inc_simple_void_NN(sv) (void)(++SvREFCNT((SV*)(sv))) #endif #ifndef newSV_type #if defined(NEED_newSV_type) static SV* DPPP_(my_newSV_type)(pTHX_ svtype const t); static #else extern SV* DPPP_(my_newSV_type)(pTHX_ svtype const t); #endif #ifdef newSV_type #undef newSV_type #endif #define newSV_type(a) DPPP_(my_newSV_type)(aTHX_ a) #define Perl_newSV_type DPPP_(my_newSV_type) #if defined(NEED_newSV_type) || defined(NEED_newSV_type_GLOBAL) SV* DPPP_(my_newSV_type)(pTHX_ svtype const t) { SV* const sv = newSV(0); sv_upgrade(sv, t); return sv; } #endif #endif #if (PERL_BCDVERSION < 0x5006000) #define D_PPP_CONSTPV_ARG(x) ((char *) (x)) #else #define D_PPP_CONSTPV_ARG(x) (x) #endif #ifndef newSVpvn #define newSVpvn(data,len) ((data) \ ? ((len) ? newSVpv((data), (len)) : newSVpv("", 0)) \ : newSV(0)) #endif #ifndef newSVpvn_utf8 #define newSVpvn_utf8(s, len, u) newSVpvn_flags((s), (len), (u) ? SVf_UTF8 : 0) #endif #ifndef SVf_UTF8 #define SVf_UTF8 0 #endif #ifndef newSVpvn_flags #if defined(NEED_newSVpvn_flags) static SV * DPPP_(my_newSVpvn_flags)(pTHX_ const char *s, STRLEN len, U32 flags); static #else extern SV * DPPP_(my_newSVpvn_flags)(pTHX_ const char *s, STRLEN len, U32 flags); #endif #ifdef newSVpvn_flags #undef newSVpvn_flags #endif #define newSVpvn_flags(a,b,c) DPPP_(my_newSVpvn_flags)(aTHX_ a,b,c) #define Perl_newSVpvn_flags DPPP_(my_newSVpvn_flags) #if defined(NEED_newSVpvn_flags) || defined(NEED_newSVpvn_flags_GLOBAL) SV * DPPP_(my_newSVpvn_flags)(pTHX_ const char *s, STRLEN len, U32 flags) { SV *sv = newSVpvn(D_PPP_CONSTPV_ARG(s), len); SvFLAGS(sv) |= (flags & SVf_UTF8); return (flags & SVs_TEMP) ? sv_2mortal(sv) : sv; } #endif #endif #if !defined(NEED_sv_2pv_flags) && defined(NEED_sv_2pv_nolen) #define NEED_sv_2pv_flags #endif #if !defined(NEED_sv_2pv_flags_GLOBAL) && defined(NEED_sv_2pv_nolen_GLOBAL) #define NEED_sv_2pv_flags_GLOBAL #endif #ifndef sv_2pv_nolen #define sv_2pv_nolen(sv) SvPV_nolen(sv) #endif #ifdef SvPVbyte #if (PERL_BCDVERSION < 0x5007000) #if defined(NEED_sv_2pvbyte) static char * DPPP_(my_sv_2pvbyte)(pTHX_ SV *sv, STRLEN *lp); static #else extern char * DPPP_(my_sv_2pvbyte)(pTHX_ SV *sv, STRLEN *lp); #endif #ifdef sv_2pvbyte #undef sv_2pvbyte #endif #define sv_2pvbyte(a,b) DPPP_(my_sv_2pvbyte)(aTHX_ a,b) #define Perl_sv_2pvbyte DPPP_(my_sv_2pvbyte) #if defined(NEED_sv_2pvbyte) || defined(NEED_sv_2pvbyte_GLOBAL) char * DPPP_(my_sv_2pvbyte)(pTHX_ SV *sv, STRLEN *lp) { sv_utf8_downgrade(sv,0); return SvPV(sv,*lp); } #endif #undef SvPVbyte #define SvPVbyte(sv, lp) \ ((SvFLAGS(sv) & (SVf_POK|SVf_UTF8)) == (SVf_POK) \ ? ((lp = SvCUR(sv)), SvPVX(sv)) : sv_2pvbyte(sv, &lp)) #endif #else #define SvPVbyte SvPV #define sv_2pvbyte sv_2pv #endif #ifndef sv_2pvbyte_nolen #define sv_2pvbyte_nolen(sv) sv_2pv_nolen(sv) #endif #ifndef SV_IMMEDIATE_UNREF #define SV_IMMEDIATE_UNREF 0 #endif #ifndef SV_GMAGIC #define SV_GMAGIC 0 #endif #ifndef SV_COW_DROP_PV #define SV_COW_DROP_PV 0 #endif #ifndef SV_UTF8_NO_ENCODING #define SV_UTF8_NO_ENCODING 0 #endif #ifndef SV_NOSTEAL #define SV_NOSTEAL 0 #endif #ifndef SV_CONST_RETURN #define SV_CONST_RETURN 0 #endif #ifndef SV_MUTABLE_RETURN #define SV_MUTABLE_RETURN 0 #endif #ifndef SV_SMAGIC #define SV_SMAGIC 0 #endif #ifndef SV_HAS_TRAILING_NUL #define SV_HAS_TRAILING_NUL 0 #endif #ifndef SV_COW_SHARED_HASH_KEYS #define SV_COW_SHARED_HASH_KEYS 0 #endif #if (PERL_BCDVERSION < 0x5007002) #if defined(NEED_sv_2pv_flags) static char * DPPP_(my_sv_2pv_flags)(pTHX_ SV *sv, STRLEN *lp, I32 flags); static #else extern char * DPPP_(my_sv_2pv_flags)(pTHX_ SV *sv, STRLEN *lp, I32 flags); #endif #ifdef sv_2pv_flags #undef sv_2pv_flags #endif #define sv_2pv_flags(a,b,c) DPPP_(my_sv_2pv_flags)(aTHX_ a,b,c) #define Perl_sv_2pv_flags DPPP_(my_sv_2pv_flags) #if defined(NEED_sv_2pv_flags) || defined(NEED_sv_2pv_flags_GLOBAL) char * DPPP_(my_sv_2pv_flags)(pTHX_ SV *sv, STRLEN *lp, I32 flags) { STRLEN n_a = (STRLEN) flags; return sv_2pv(sv, lp ? lp : &n_a); } #endif #if defined(NEED_sv_pvn_force_flags) static char * DPPP_(my_sv_pvn_force_flags)(pTHX_ SV *sv, STRLEN *lp, I32 flags); static #else extern char * DPPP_(my_sv_pvn_force_flags)(pTHX_ SV *sv, STRLEN *lp, I32 flags); #endif #ifdef sv_pvn_force_flags #undef sv_pvn_force_flags #endif #define sv_pvn_force_flags(a,b,c) DPPP_(my_sv_pvn_force_flags)(aTHX_ a,b,c) #define Perl_sv_pvn_force_flags DPPP_(my_sv_pvn_force_flags) #if defined(NEED_sv_pvn_force_flags) || defined(NEED_sv_pvn_force_flags_GLOBAL) char * DPPP_(my_sv_pvn_force_flags)(pTHX_ SV *sv, STRLEN *lp, I32 flags) { STRLEN n_a = (STRLEN) flags; return sv_pvn_force(sv, lp ? lp : &n_a); } #endif #endif #if (PERL_BCDVERSION < 0x5008008) || ( (PERL_BCDVERSION >= 0x5009000) && (PERL_BCDVERSION < 0x5009003) ) #define DPPP_SVPV_NOLEN_LP_ARG &PL_na #else #define DPPP_SVPV_NOLEN_LP_ARG 0 #endif #ifndef SvPV_const #define SvPV_const(sv, lp) SvPV_flags_const(sv, lp, SV_GMAGIC) #endif #ifndef SvPV_mutable #define SvPV_mutable(sv, lp) SvPV_flags_mutable(sv, lp, SV_GMAGIC) #endif #ifndef SvPV_flags #define SvPV_flags(sv, lp, flags) \ ((SvFLAGS(sv) & (SVf_POK)) == SVf_POK \ ? ((lp = SvCUR(sv)), SvPVX(sv)) : sv_2pv_flags(sv, &lp, flags)) #endif #ifndef SvPV_flags_const #define SvPV_flags_const(sv, lp, flags) \ ((SvFLAGS(sv) & (SVf_POK)) == SVf_POK \ ? ((lp = SvCUR(sv)), SvPVX_const(sv)) : \ (const char*) sv_2pv_flags(sv, &lp, flags|SV_CONST_RETURN)) #endif #ifndef SvPV_flags_const_nolen #define SvPV_flags_const_nolen(sv, flags) \ ((SvFLAGS(sv) & (SVf_POK)) == SVf_POK \ ? SvPVX_const(sv) : \ (const char*) sv_2pv_flags(sv, DPPP_SVPV_NOLEN_LP_ARG, flags|SV_CONST_RETURN)) #endif #ifndef SvPV_flags_mutable #define SvPV_flags_mutable(sv, lp, flags) \ ((SvFLAGS(sv) & (SVf_POK)) == SVf_POK \ ? ((lp = SvCUR(sv)), SvPVX_mutable(sv)) : \ sv_2pv_flags(sv, &lp, flags|SV_MUTABLE_RETURN)) #endif #ifndef SvPV_force #define SvPV_force(sv, lp) SvPV_force_flags(sv, lp, SV_GMAGIC) #endif #ifndef SvPV_force_nolen #define SvPV_force_nolen(sv) SvPV_force_flags_nolen(sv, SV_GMAGIC) #endif #ifndef SvPV_force_mutable #define SvPV_force_mutable(sv, lp) SvPV_force_flags_mutable(sv, lp, SV_GMAGIC) #endif #ifndef SvPV_force_nomg #define SvPV_force_nomg(sv, lp) SvPV_force_flags(sv, lp, 0) #endif #ifndef SvPV_force_nomg_nolen #define SvPV_force_nomg_nolen(sv) SvPV_force_flags_nolen(sv, 0) #endif #ifndef SvPV_force_flags #define SvPV_force_flags(sv, lp, flags) \ ((SvFLAGS(sv) & (SVf_POK|SVf_THINKFIRST)) == SVf_POK \ ? ((lp = SvCUR(sv)), SvPVX(sv)) : sv_pvn_force_flags(sv, &lp, flags)) #endif #ifndef SvPV_force_flags_nolen #define SvPV_force_flags_nolen(sv, flags) \ ((SvFLAGS(sv) & (SVf_POK|SVf_THINKFIRST)) == SVf_POK \ ? SvPVX(sv) : sv_pvn_force_flags(sv, DPPP_SVPV_NOLEN_LP_ARG, flags)) #endif #ifndef SvPV_force_flags_mutable #define SvPV_force_flags_mutable(sv, lp, flags) \ ((SvFLAGS(sv) & (SVf_POK|SVf_THINKFIRST)) == SVf_POK \ ? ((lp = SvCUR(sv)), SvPVX_mutable(sv)) \ : sv_pvn_force_flags(sv, &lp, flags|SV_MUTABLE_RETURN)) #endif #ifndef SvPV_nolen #define SvPV_nolen(sv) \ ((SvFLAGS(sv) & (SVf_POK)) == SVf_POK \ ? SvPVX(sv) : sv_2pv_flags(sv, DPPP_SVPV_NOLEN_LP_ARG, SV_GMAGIC)) #endif #ifndef SvPV_nolen_const #define SvPV_nolen_const(sv) \ ((SvFLAGS(sv) & (SVf_POK)) == SVf_POK \ ? SvPVX_const(sv) : sv_2pv_flags(sv, DPPP_SVPV_NOLEN_LP_ARG, SV_GMAGIC|SV_CONST_RETURN)) #endif #ifndef SvPV_nomg #define SvPV_nomg(sv, lp) SvPV_flags(sv, lp, 0) #endif #ifndef SvPV_nomg_const #define SvPV_nomg_const(sv, lp) SvPV_flags_const(sv, lp, 0) #endif #ifndef SvPV_nomg_const_nolen #define SvPV_nomg_const_nolen(sv) SvPV_flags_const_nolen(sv, 0) #endif #ifndef SvPV_nomg_nolen #define SvPV_nomg_nolen(sv) ((SvFLAGS(sv) & (SVf_POK)) == SVf_POK \ ? SvPVX(sv) : sv_2pv_flags(sv, DPPP_SVPV_NOLEN_LP_ARG, 0)) #endif #ifndef SvPV_renew #define SvPV_renew(sv,n) STMT_START { SvLEN_set(sv, n); \ SvPV_set((sv), (char *) saferealloc( \ (Malloc_t)SvPVX(sv), (MEM_SIZE)((n)))); \ } STMT_END #endif #ifndef SvMAGIC_set #define SvMAGIC_set(sv, val) \ STMT_START { assert(SvTYPE(sv) >= SVt_PVMG); \ (((XPVMG*) SvANY(sv))->xmg_magic = (val)); } STMT_END #endif #if (PERL_BCDVERSION < 0x5009003) #ifndef SvPVX_const #define SvPVX_const(sv) ((const char*) (0 + SvPVX(sv))) #endif #ifndef SvPVX_mutable #define SvPVX_mutable(sv) (0 + SvPVX(sv)) #endif #ifndef SvRV_set #define SvRV_set(sv, val) \ STMT_START { assert(SvTYPE(sv) >= SVt_RV); \ (((XRV*) SvANY(sv))->xrv_rv = (val)); } STMT_END #endif #else #ifndef SvPVX_const #define SvPVX_const(sv) ((const char*)((sv)->sv_u.svu_pv)) #endif #ifndef SvPVX_mutable #define SvPVX_mutable(sv) ((sv)->sv_u.svu_pv) #endif #ifndef SvRV_set #define SvRV_set(sv, val) \ STMT_START { assert(SvTYPE(sv) >= SVt_RV); \ ((sv)->sv_u.svu_rv = (val)); } STMT_END #endif #endif #ifndef SvSTASH_set #define SvSTASH_set(sv, val) \ STMT_START { assert(SvTYPE(sv) >= SVt_PVMG); \ (((XPVMG*) SvANY(sv))->xmg_stash = (val)); } STMT_END #endif #if (PERL_BCDVERSION < 0x5004000) #ifndef SvUV_set #define SvUV_set(sv, val) \ STMT_START { assert(SvTYPE(sv) == SVt_IV || SvTYPE(sv) >= SVt_PVIV); \ (((XPVIV*) SvANY(sv))->xiv_iv = (IV) (val)); } STMT_END #endif #else #ifndef SvUV_set #define SvUV_set(sv, val) \ STMT_START { assert(SvTYPE(sv) == SVt_IV || SvTYPE(sv) >= SVt_PVIV); \ (((XPVUV*) SvANY(sv))->xuv_uv = (val)); } STMT_END #endif #endif #if (PERL_BCDVERSION >= 0x5004000) && !defined(vnewSVpvf) #if defined(NEED_vnewSVpvf) static SV * DPPP_(my_vnewSVpvf)(pTHX_ const char *pat, va_list *args); static #else extern SV * DPPP_(my_vnewSVpvf)(pTHX_ const char *pat, va_list *args); #endif #ifdef vnewSVpvf #undef vnewSVpvf #endif #define vnewSVpvf(a,b) DPPP_(my_vnewSVpvf)(aTHX_ a,b) #define Perl_vnewSVpvf DPPP_(my_vnewSVpvf) #if defined(NEED_vnewSVpvf) || defined(NEED_vnewSVpvf_GLOBAL) SV * DPPP_(my_vnewSVpvf)(pTHX_ const char *pat, va_list *args) { register SV *sv = newSV(0); sv_vsetpvfn(sv, pat, strlen(pat), args, Null(SV**), 0, Null(bool*)); return sv; } #endif #endif #if (PERL_BCDVERSION >= 0x5004000) && !defined(sv_vcatpvf) #define sv_vcatpvf(sv, pat, args) sv_vcatpvfn(sv, pat, strlen(pat), args, Null(SV**), 0, Null(bool*)) #endif #if (PERL_BCDVERSION >= 0x5004000) && !defined(sv_vsetpvf) #define sv_vsetpvf(sv, pat, args) sv_vsetpvfn(sv, pat, strlen(pat), args, Null(SV**), 0, Null(bool*)) #endif #if (PERL_BCDVERSION >= 0x5004000) && !defined(sv_catpvf_mg) #if defined(NEED_sv_catpvf_mg) static void DPPP_(my_sv_catpvf_mg)(pTHX_ SV *sv, const char *pat, ...); static #else extern void DPPP_(my_sv_catpvf_mg)(pTHX_ SV *sv, const char *pat, ...); #endif #define Perl_sv_catpvf_mg DPPP_(my_sv_catpvf_mg) #if defined(NEED_sv_catpvf_mg) || defined(NEED_sv_catpvf_mg_GLOBAL) void DPPP_(my_sv_catpvf_mg)(pTHX_ SV *sv, const char *pat, ...) { va_list args; va_start(args, pat); sv_vcatpvfn(sv, pat, strlen(pat), &args, Null(SV**), 0, Null(bool*)); SvSETMAGIC(sv); va_end(args); } #endif #endif #ifdef PERL_IMPLICIT_CONTEXT #if (PERL_BCDVERSION >= 0x5004000) && !defined(sv_catpvf_mg_nocontext) #if defined(NEED_sv_catpvf_mg_nocontext) static void DPPP_(my_sv_catpvf_mg_nocontext)(SV *sv, const char *pat, ...); static #else extern void DPPP_(my_sv_catpvf_mg_nocontext)(SV *sv, const char *pat, ...); #endif #define sv_catpvf_mg_nocontext DPPP_(my_sv_catpvf_mg_nocontext) #define Perl_sv_catpvf_mg_nocontext DPPP_(my_sv_catpvf_mg_nocontext) #if defined(NEED_sv_catpvf_mg_nocontext) || defined(NEED_sv_catpvf_mg_nocontext_GLOBAL) void DPPP_(my_sv_catpvf_mg_nocontext)(SV *sv, const char *pat, ...) { dTHX; va_list args; va_start(args, pat); sv_vcatpvfn(sv, pat, strlen(pat), &args, Null(SV**), 0, Null(bool*)); SvSETMAGIC(sv); va_end(args); } #endif #endif #endif #ifndef sv_catpvf_mg #ifdef PERL_IMPLICIT_CONTEXT #define sv_catpvf_mg Perl_sv_catpvf_mg_nocontext #else #define sv_catpvf_mg Perl_sv_catpvf_mg #endif #endif #if (PERL_BCDVERSION >= 0x5004000) && !defined(sv_vcatpvf_mg) #define sv_vcatpvf_mg(sv, pat, args) \ STMT_START { \ sv_vcatpvfn(sv, pat, strlen(pat), args, Null(SV**), 0, Null(bool*)); \ SvSETMAGIC(sv); \ } STMT_END #endif #if (PERL_BCDVERSION >= 0x5004000) && !defined(sv_setpvf_mg) #if defined(NEED_sv_setpvf_mg) static void DPPP_(my_sv_setpvf_mg)(pTHX_ SV *sv, const char *pat, ...); static #else extern void DPPP_(my_sv_setpvf_mg)(pTHX_ SV *sv, const char *pat, ...); #endif #define Perl_sv_setpvf_mg DPPP_(my_sv_setpvf_mg) #if defined(NEED_sv_setpvf_mg) || defined(NEED_sv_setpvf_mg_GLOBAL) void DPPP_(my_sv_setpvf_mg)(pTHX_ SV *sv, const char *pat, ...) { va_list args; va_start(args, pat); sv_vsetpvfn(sv, pat, strlen(pat), &args, Null(SV**), 0, Null(bool*)); SvSETMAGIC(sv); va_end(args); } #endif #endif #ifdef PERL_IMPLICIT_CONTEXT #if (PERL_BCDVERSION >= 0x5004000) && !defined(sv_setpvf_mg_nocontext) #if defined(NEED_sv_setpvf_mg_nocontext) static void DPPP_(my_sv_setpvf_mg_nocontext)(SV *sv, const char *pat, ...); static #else extern void DPPP_(my_sv_setpvf_mg_nocontext)(SV *sv, const char *pat, ...); #endif #define sv_setpvf_mg_nocontext DPPP_(my_sv_setpvf_mg_nocontext) #define Perl_sv_setpvf_mg_nocontext DPPP_(my_sv_setpvf_mg_nocontext) #if defined(NEED_sv_setpvf_mg_nocontext) || defined(NEED_sv_setpvf_mg_nocontext_GLOBAL) void DPPP_(my_sv_setpvf_mg_nocontext)(SV *sv, const char *pat, ...) { dTHX; va_list args; va_start(args, pat); sv_vsetpvfn(sv, pat, strlen(pat), &args, Null(SV**), 0, Null(bool*)); SvSETMAGIC(sv); va_end(args); } #endif #endif #endif #ifndef sv_setpvf_mg #ifdef PERL_IMPLICIT_CONTEXT #define sv_setpvf_mg Perl_sv_setpvf_mg_nocontext #else #define sv_setpvf_mg Perl_sv_setpvf_mg #endif #endif #if (PERL_BCDVERSION >= 0x5004000) && !defined(sv_vsetpvf_mg) #define sv_vsetpvf_mg(sv, pat, args) \ STMT_START { \ sv_vsetpvfn(sv, pat, strlen(pat), args, Null(SV**), 0, Null(bool*)); \ SvSETMAGIC(sv); \ } STMT_END #endif #ifndef newSVpvn_share #if defined(NEED_newSVpvn_share) static SV * DPPP_(my_newSVpvn_share)(pTHX_ const char *src, I32 len, U32 hash); static #else extern SV * DPPP_(my_newSVpvn_share)(pTHX_ const char *src, I32 len, U32 hash); #endif #ifdef newSVpvn_share #undef newSVpvn_share #endif #define newSVpvn_share(a,b,c) DPPP_(my_newSVpvn_share)(aTHX_ a,b,c) #define Perl_newSVpvn_share DPPP_(my_newSVpvn_share) #if defined(NEED_newSVpvn_share) || defined(NEED_newSVpvn_share_GLOBAL) SV * DPPP_(my_newSVpvn_share)(pTHX_ const char *src, I32 len, U32 hash) { SV *sv; if (len < 0) len = -len; if (!hash) PERL_HASH(hash, (char*) src, len); sv = newSVpvn((char *) src, len); sv_upgrade(sv, SVt_PVIV); SvIVX(sv) = hash; SvREADONLY_on(sv); SvPOK_on(sv); return sv; } #endif #endif #ifndef SvSHARED_HASH #define SvSHARED_HASH(sv) (0 + SvUVX(sv)) #endif #ifndef HvNAME_get #define HvNAME_get(hv) HvNAME(hv) #endif #ifndef HvNAMELEN_get #define HvNAMELEN_get(hv) (HvNAME_get(hv) ? (I32)strlen(HvNAME_get(hv)) : 0) #endif #ifndef GvSVn #define GvSVn(gv) GvSV(gv) #endif #ifndef isGV_with_GP #define isGV_with_GP(gv) isGV(gv) #endif #ifndef gv_fetchpvn_flags #define gv_fetchpvn_flags(name, len, flags, svt) gv_fetchpv(name, flags, svt) #endif #ifndef gv_fetchsv #define gv_fetchsv(name, flags, svt) gv_fetchpv(SvPV_nolen_const(name), flags, svt) #endif #ifndef get_cvn_flags #define get_cvn_flags(name, namelen, flags) get_cv(name, flags) #endif #ifndef WARN_ALL #define WARN_ALL 0 #endif #ifndef WARN_CLOSURE #define WARN_CLOSURE 1 #endif #ifndef WARN_DEPRECATED #define WARN_DEPRECATED 2 #endif #ifndef WARN_EXITING #define WARN_EXITING 3 #endif #ifndef WARN_GLOB #define WARN_GLOB 4 #endif #ifndef WARN_IO #define WARN_IO 5 #endif #ifndef WARN_CLOSED #define WARN_CLOSED 6 #endif #ifndef WARN_EXEC #define WARN_EXEC 7 #endif #ifndef WARN_LAYER #define WARN_LAYER 8 #endif #ifndef WARN_NEWLINE #define WARN_NEWLINE 9 #endif #ifndef WARN_PIPE #define WARN_PIPE 10 #endif #ifndef WARN_UNOPENED #define WARN_UNOPENED 11 #endif #ifndef WARN_MISC #define WARN_MISC 12 #endif #ifndef WARN_NUMERIC #define WARN_NUMERIC 13 #endif #ifndef WARN_ONCE #define WARN_ONCE 14 #endif #ifndef WARN_OVERFLOW #define WARN_OVERFLOW 15 #endif #ifndef WARN_PACK #define WARN_PACK 16 #endif #ifndef WARN_PORTABLE #define WARN_PORTABLE 17 #endif #ifndef WARN_RECURSION #define WARN_RECURSION 18 #endif #ifndef WARN_REDEFINE #define WARN_REDEFINE 19 #endif #ifndef WARN_REGEXP #define WARN_REGEXP 20 #endif #ifndef WARN_SEVERE #define WARN_SEVERE 21 #endif #ifndef WARN_DEBUGGING #define WARN_DEBUGGING 22 #endif #ifndef WARN_INPLACE #define WARN_INPLACE 23 #endif #ifndef WARN_INTERNAL #define WARN_INTERNAL 24 #endif #ifndef WARN_MALLOC #define WARN_MALLOC 25 #endif #ifndef WARN_SIGNAL #define WARN_SIGNAL 26 #endif #ifndef WARN_SUBSTR #define WARN_SUBSTR 27 #endif #ifndef WARN_SYNTAX #define WARN_SYNTAX 28 #endif #ifndef WARN_AMBIGUOUS #define WARN_AMBIGUOUS 29 #endif #ifndef WARN_BAREWORD #define WARN_BAREWORD 30 #endif #ifndef WARN_DIGIT #define WARN_DIGIT 31 #endif #ifndef WARN_PARENTHESIS #define WARN_PARENTHESIS 32 #endif #ifndef WARN_PRECEDENCE #define WARN_PRECEDENCE 33 #endif #ifndef WARN_PRINTF #define WARN_PRINTF 34 #endif #ifndef WARN_PROTOTYPE #define WARN_PROTOTYPE 35 #endif #ifndef WARN_QW #define WARN_QW 36 #endif #ifndef WARN_RESERVED #define WARN_RESERVED 37 #endif #ifndef WARN_SEMICOLON #define WARN_SEMICOLON 38 #endif #ifndef WARN_TAINT #define WARN_TAINT 39 #endif #ifndef WARN_THREADS #define WARN_THREADS 40 #endif #ifndef WARN_UNINITIALIZED #define WARN_UNINITIALIZED 41 #endif #ifndef WARN_UNPACK #define WARN_UNPACK 42 #endif #ifndef WARN_UNTIE #define WARN_UNTIE 43 #endif #ifndef WARN_UTF8 #define WARN_UTF8 44 #endif #ifndef WARN_VOID #define WARN_VOID 45 #endif #ifndef WARN_ASSERTIONS #define WARN_ASSERTIONS 46 #endif #ifndef packWARN #define packWARN(a) (a) #endif #ifndef ckWARN #ifdef G_WARN_ON #define ckWARN(a) (PL_dowarn & G_WARN_ON) #else #define ckWARN(a) PL_dowarn #endif #endif #if (PERL_BCDVERSION >= 0x5004000) && !defined(warner) #if defined(NEED_warner) static void DPPP_(my_warner)(U32 err, const char *pat, ...); static #else extern void DPPP_(my_warner)(U32 err, const char *pat, ...); #endif #define Perl_warner DPPP_(my_warner) #if defined(NEED_warner) || defined(NEED_warner_GLOBAL) void DPPP_(my_warner)(U32 err, const char *pat, ...) { SV *sv; va_list args; PERL_UNUSED_ARG(err); va_start(args, pat); sv = vnewSVpvf(pat, &args); va_end(args); sv_2mortal(sv); warn("%s", SvPV_nolen(sv)); } #define warner Perl_warner #define Perl_warner_nocontext Perl_warner #endif #endif #ifndef STR_WITH_LEN #define STR_WITH_LEN(s) (s ""), (sizeof(s)-1) #endif #ifndef newSVpvs #define newSVpvs(str) newSVpvn(str "", sizeof(str) - 1) #endif #ifndef newSVpvs_flags #define newSVpvs_flags(str, flags) newSVpvn_flags(str "", sizeof(str) - 1, flags) #endif #ifndef newSVpvs_share #define newSVpvs_share(str) newSVpvn_share(str "", sizeof(str) - 1, 0) #endif #ifndef sv_catpvs #define sv_catpvs(sv, str) sv_catpvn(sv, str "", sizeof(str) - 1) #endif #ifndef sv_setpvs #define sv_setpvs(sv, str) sv_setpvn(sv, str "", sizeof(str) - 1) #endif #ifndef hv_fetchs #define hv_fetchs(hv, key, lval) hv_fetch(hv, key "", sizeof(key) - 1, lval) #endif #ifndef hv_stores #define hv_stores(hv, key, val) hv_store(hv, key "", sizeof(key) - 1, val, 0) #endif #ifndef gv_fetchpvs #define gv_fetchpvs(name, flags, svt) gv_fetchpvn_flags(name "", sizeof(name) - 1, flags, svt) #endif #ifndef gv_stashpvs #define gv_stashpvs(name, flags) gv_stashpvn(name "", sizeof(name) - 1, flags) #endif #ifndef get_cvs #define get_cvs(name, flags) get_cvn_flags(name "", sizeof(name)-1, flags) #endif #ifndef SvGETMAGIC #define SvGETMAGIC(x) STMT_START { if (SvGMAGICAL(x)) mg_get(x); } STMT_END #endif #ifndef HEf_SVKEY #define HEf_SVKEY -2 #endif #if defined(__GNUC__) && !defined(PERL_GCC_BRACE_GROUPS_FORBIDDEN) #define MUTABLE_PTR(p) ({ void *_p = (p); _p; }) #else #define MUTABLE_PTR(p) ((void *) (p)) #endif #define MUTABLE_SV(p) ((SV *)MUTABLE_PTR(p)) #ifndef PERL_MAGIC_sv #define PERL_MAGIC_sv '\0' #endif #ifndef PERL_MAGIC_overload #define PERL_MAGIC_overload 'A' #endif #ifndef PERL_MAGIC_overload_elem #define PERL_MAGIC_overload_elem 'a' #endif #ifndef PERL_MAGIC_overload_table #define PERL_MAGIC_overload_table 'c' #endif #ifndef PERL_MAGIC_bm #define PERL_MAGIC_bm 'B' #endif #ifndef PERL_MAGIC_regdata #define PERL_MAGIC_regdata 'D' #endif #ifndef PERL_MAGIC_regdatum #define PERL_MAGIC_regdatum 'd' #endif #ifndef PERL_MAGIC_env #define PERL_MAGIC_env 'E' #endif #ifndef PERL_MAGIC_envelem #define PERL_MAGIC_envelem 'e' #endif #ifndef PERL_MAGIC_fm #define PERL_MAGIC_fm 'f' #endif #ifndef PERL_MAGIC_regex_global #define PERL_MAGIC_regex_global 'g' #endif #ifndef PERL_MAGIC_isa #define PERL_MAGIC_isa 'I' #endif #ifndef PERL_MAGIC_isaelem #define PERL_MAGIC_isaelem 'i' #endif #ifndef PERL_MAGIC_nkeys #define PERL_MAGIC_nkeys 'k' #endif #ifndef PERL_MAGIC_dbfile #define PERL_MAGIC_dbfile 'L' #endif #ifndef PERL_MAGIC_dbline #define PERL_MAGIC_dbline 'l' #endif #ifndef PERL_MAGIC_mutex #define PERL_MAGIC_mutex 'm' #endif #ifndef PERL_MAGIC_shared #define PERL_MAGIC_shared 'N' #endif #ifndef PERL_MAGIC_shared_scalar #define PERL_MAGIC_shared_scalar 'n' #endif #ifndef PERL_MAGIC_collxfrm #define PERL_MAGIC_collxfrm 'o' #endif #ifndef PERL_MAGIC_tied #define PERL_MAGIC_tied 'P' #endif #ifndef PERL_MAGIC_tiedelem #define PERL_MAGIC_tiedelem 'p' #endif #ifndef PERL_MAGIC_tiedscalar #define PERL_MAGIC_tiedscalar 'q' #endif #ifndef PERL_MAGIC_qr #define PERL_MAGIC_qr 'r' #endif #ifndef PERL_MAGIC_sig #define PERL_MAGIC_sig 'S' #endif #ifndef PERL_MAGIC_sigelem #define PERL_MAGIC_sigelem 's' #endif #ifndef PERL_MAGIC_taint #define PERL_MAGIC_taint 't' #endif #ifndef PERL_MAGIC_uvar #define PERL_MAGIC_uvar 'U' #endif #ifndef PERL_MAGIC_uvar_elem #define PERL_MAGIC_uvar_elem 'u' #endif #ifndef PERL_MAGIC_vstring #define PERL_MAGIC_vstring 'V' #endif #ifndef PERL_MAGIC_vec #define PERL_MAGIC_vec 'v' #endif #ifndef PERL_MAGIC_utf8 #define PERL_MAGIC_utf8 'w' #endif #ifndef PERL_MAGIC_substr #define PERL_MAGIC_substr 'x' #endif #ifndef PERL_MAGIC_defelem #define PERL_MAGIC_defelem 'y' #endif #ifndef PERL_MAGIC_glob #define PERL_MAGIC_glob '*' #endif #ifndef PERL_MAGIC_arylen #define PERL_MAGIC_arylen '#' #endif #ifndef PERL_MAGIC_pos #define PERL_MAGIC_pos '.' #endif #ifndef PERL_MAGIC_backref #define PERL_MAGIC_backref '<' #endif #ifndef PERL_MAGIC_ext #define PERL_MAGIC_ext '~' #endif #ifndef sv_catpvn_nomg #define sv_catpvn_nomg sv_catpvn #endif #ifndef sv_catsv_nomg #define sv_catsv_nomg sv_catsv #endif #ifndef sv_setsv_nomg #define sv_setsv_nomg sv_setsv #endif #ifndef sv_pvn_nomg #define sv_pvn_nomg sv_pvn #endif #ifndef SvIV_nomg #define SvIV_nomg SvIV #endif #ifndef SvUV_nomg #define SvUV_nomg SvUV #endif #ifndef sv_catpv_mg #define sv_catpv_mg(sv, ptr) \ STMT_START { \ SV *TeMpSv = sv; \ sv_catpv(TeMpSv,ptr); \ SvSETMAGIC(TeMpSv); \ } STMT_END #endif #ifndef sv_catpvn_mg #define sv_catpvn_mg(sv, ptr, len) \ STMT_START { \ SV *TeMpSv = sv; \ sv_catpvn(TeMpSv,ptr,len); \ SvSETMAGIC(TeMpSv); \ } STMT_END #endif #ifndef sv_catsv_mg #define sv_catsv_mg(dsv, ssv) \ STMT_START { \ SV *TeMpSv = dsv; \ sv_catsv(TeMpSv,ssv); \ SvSETMAGIC(TeMpSv); \ } STMT_END #endif #ifndef sv_setiv_mg #define sv_setiv_mg(sv, i) \ STMT_START { \ SV *TeMpSv = sv; \ sv_setiv(TeMpSv,i); \ SvSETMAGIC(TeMpSv); \ } STMT_END #endif #ifndef sv_setnv_mg #define sv_setnv_mg(sv, num) \ STMT_START { \ SV *TeMpSv = sv; \ sv_setnv(TeMpSv,num); \ SvSETMAGIC(TeMpSv); \ } STMT_END #endif #ifndef sv_setpv_mg #define sv_setpv_mg(sv, ptr) \ STMT_START { \ SV *TeMpSv = sv; \ sv_setpv(TeMpSv,ptr); \ SvSETMAGIC(TeMpSv); \ } STMT_END #endif #ifndef sv_setpvn_mg #define sv_setpvn_mg(sv, ptr, len) \ STMT_START { \ SV *TeMpSv = sv; \ sv_setpvn(TeMpSv,ptr,len); \ SvSETMAGIC(TeMpSv); \ } STMT_END #endif #ifndef sv_setsv_mg #define sv_setsv_mg(dsv, ssv) \ STMT_START { \ SV *TeMpSv = dsv; \ sv_setsv(TeMpSv,ssv); \ SvSETMAGIC(TeMpSv); \ } STMT_END #endif #ifndef sv_setuv_mg #define sv_setuv_mg(sv, i) \ STMT_START { \ SV *TeMpSv = sv; \ sv_setuv(TeMpSv,i); \ SvSETMAGIC(TeMpSv); \ } STMT_END #endif #ifndef sv_usepvn_mg #define sv_usepvn_mg(sv, ptr, len) \ STMT_START { \ SV *TeMpSv = sv; \ sv_usepvn(TeMpSv,ptr,len); \ SvSETMAGIC(TeMpSv); \ } STMT_END #endif #ifndef SvVSTRING_mg #define SvVSTRING_mg(sv) (SvMAGICAL(sv) ? mg_find(sv, PERL_MAGIC_vstring) : NULL) #endif #if (PERL_BCDVERSION < 0x5004000) #elif (PERL_BCDVERSION < 0x5008000) #define sv_magic_portable(sv, obj, how, name, namlen) \ STMT_START { \ SV *SvMp_sv = (sv); \ char *SvMp_name = (char *) (name); \ I32 SvMp_namlen = (namlen); \ if (SvMp_name && SvMp_namlen == 0) \ { \ MAGIC *mg; \ sv_magic(SvMp_sv, obj, how, 0, 0); \ mg = SvMAGIC(SvMp_sv); \ mg->mg_len = -42; \ mg->mg_ptr = SvMp_name; \ } \ else \ { \ sv_magic(SvMp_sv, obj, how, SvMp_name, SvMp_namlen); \ } \ } STMT_END #else #define sv_magic_portable(a, b, c, d, e) sv_magic(a, b, c, d, e) #endif #if !defined(mg_findext) #if defined(NEED_mg_findext) static MAGIC * DPPP_(my_mg_findext)(SV * sv, int type, const MGVTBL *vtbl); static #else extern MAGIC * DPPP_(my_mg_findext)(SV * sv, int type, const MGVTBL *vtbl); #endif #define mg_findext DPPP_(my_mg_findext) #define Perl_mg_findext DPPP_(my_mg_findext) #if defined(NEED_mg_findext) || defined(NEED_mg_findext_GLOBAL) MAGIC * DPPP_(my_mg_findext)(SV * sv, int type, const MGVTBL *vtbl) { if (sv) { MAGIC *mg; #ifdef AvPAD_NAMELIST assert(!(SvTYPE(sv) == SVt_PVAV && AvPAD_NAMELIST(sv))); #endif for (mg = SvMAGIC (sv); mg; mg = mg->mg_moremagic) { if (mg->mg_type == type && mg->mg_virtual == vtbl) return mg; } } return NULL; } #endif #endif #if !defined(sv_unmagicext) #if defined(NEED_sv_unmagicext) static int DPPP_(my_sv_unmagicext)(pTHX_ SV * const sv, const int type, MGVTBL * vtbl); static #else extern int DPPP_(my_sv_unmagicext)(pTHX_ SV * const sv, const int type, MGVTBL * vtbl); #endif #ifdef sv_unmagicext #undef sv_unmagicext #endif #define sv_unmagicext(a,b,c) DPPP_(my_sv_unmagicext)(aTHX_ a,b,c) #define Perl_sv_unmagicext DPPP_(my_sv_unmagicext) #if defined(NEED_sv_unmagicext) || defined(NEED_sv_unmagicext_GLOBAL) int DPPP_(my_sv_unmagicext)(pTHX_ SV *const sv, const int type, MGVTBL *vtbl) { MAGIC* mg; MAGIC** mgp; if (SvTYPE(sv) < SVt_PVMG || !SvMAGIC(sv)) return 0; mgp = &(SvMAGIC(sv)); for (mg = *mgp; mg; mg = *mgp) { const MGVTBL* const virt = mg->mg_virtual; if (mg->mg_type == type && virt == vtbl) { *mgp = mg->mg_moremagic; if (virt && virt->svt_free) virt->svt_free(aTHX_ sv, mg); if (mg->mg_ptr && mg->mg_type != PERL_MAGIC_regex_global) { if (mg->mg_len > 0) Safefree(mg->mg_ptr); else if (mg->mg_len == HEf_SVKEY) SvREFCNT_dec(MUTABLE_SV(mg->mg_ptr)); else if (mg->mg_type == PERL_MAGIC_utf8) Safefree(mg->mg_ptr); } if (mg->mg_flags & MGf_REFCOUNTED) SvREFCNT_dec(mg->mg_obj); Safefree(mg); } else mgp = &mg->mg_moremagic; } if (SvMAGIC(sv)) { if (SvMAGICAL(sv)) mg_magical(sv); } else { SvMAGICAL_off(sv); SvFLAGS(sv) |= (SvFLAGS(sv) & (SVp_IOK|SVp_NOK|SVp_POK)) >> PRIVSHIFT; } return 0; } #endif #endif #ifdef USE_ITHREADS #ifndef CopFILE #define CopFILE(c) ((c)->cop_file) #endif #ifndef CopFILEGV #define CopFILEGV(c) (CopFILE(c) ? gv_fetchfile(CopFILE(c)) : Nullgv) #endif #ifndef CopFILE_set #define CopFILE_set(c,pv) ((c)->cop_file = savepv(pv)) #endif #ifndef CopFILESV #define CopFILESV(c) (CopFILE(c) ? GvSV(gv_fetchfile(CopFILE(c))) : Nullsv) #endif #ifndef CopFILEAV #define CopFILEAV(c) (CopFILE(c) ? GvAV(gv_fetchfile(CopFILE(c))) : Nullav) #endif #ifndef CopSTASHPV #define CopSTASHPV(c) ((c)->cop_stashpv) #endif #ifndef CopSTASHPV_set #define CopSTASHPV_set(c,pv) ((c)->cop_stashpv = ((pv) ? savepv(pv) : Nullch)) #endif #ifndef CopSTASH #define CopSTASH(c) (CopSTASHPV(c) ? gv_stashpv(CopSTASHPV(c),GV_ADD) : Nullhv) #endif #ifndef CopSTASH_set #define CopSTASH_set(c,hv) CopSTASHPV_set(c, (hv) ? HvNAME(hv) : Nullch) #endif #ifndef CopSTASH_eq #define CopSTASH_eq(c,hv) ((hv) && (CopSTASHPV(c) == HvNAME(hv) \ || (CopSTASHPV(c) && HvNAME(hv) \ && strEQ(CopSTASHPV(c), HvNAME(hv))))) #endif #else #ifndef CopFILEGV #define CopFILEGV(c) ((c)->cop_filegv) #endif #ifndef CopFILEGV_set #define CopFILEGV_set(c,gv) ((c)->cop_filegv = (GV*)SvREFCNT_inc(gv)) #endif #ifndef CopFILE_set #define CopFILE_set(c,pv) CopFILEGV_set((c), gv_fetchfile(pv)) #endif #ifndef CopFILESV #define CopFILESV(c) (CopFILEGV(c) ? GvSV(CopFILEGV(c)) : Nullsv) #endif #ifndef CopFILEAV #define CopFILEAV(c) (CopFILEGV(c) ? GvAV(CopFILEGV(c)) : Nullav) #endif #ifndef CopFILE #define CopFILE(c) (CopFILESV(c) ? SvPVX(CopFILESV(c)) : Nullch) #endif #ifndef CopSTASH #define CopSTASH(c) ((c)->cop_stash) #endif #ifndef CopSTASH_set #define CopSTASH_set(c,hv) ((c)->cop_stash = (hv)) #endif #ifndef CopSTASHPV #define CopSTASHPV(c) (CopSTASH(c) ? HvNAME(CopSTASH(c)) : Nullch) #endif #ifndef CopSTASHPV_set #define CopSTASHPV_set(c,pv) CopSTASH_set((c), gv_stashpv(pv,GV_ADD)) #endif #ifndef CopSTASH_eq #define CopSTASH_eq(c,hv) (CopSTASH(c) == (hv)) #endif #endif #if (PERL_BCDVERSION >= 0x5006000) #ifndef caller_cx #if defined(NEED_caller_cx) || defined(NEED_caller_cx_GLOBAL) static I32 DPPP_dopoptosub_at(const PERL_CONTEXT *cxstk, I32 startingblock) { I32 i; for (i = startingblock; i >= 0; i--) { register const PERL_CONTEXT * const cx = &cxstk[i]; switch (CxTYPE(cx)) { default: continue; case CXt_EVAL: case CXt_SUB: case CXt_FORMAT: return i; } } return i; } #endif #if defined(NEED_caller_cx) static const PERL_CONTEXT * DPPP_(my_caller_cx)(pTHX_ I32 count, const PERL_CONTEXT **dbcxp); static #else extern const PERL_CONTEXT * DPPP_(my_caller_cx)(pTHX_ I32 count, const PERL_CONTEXT **dbcxp); #endif #ifdef caller_cx #undef caller_cx #endif #define caller_cx(a,b) DPPP_(my_caller_cx)(aTHX_ a,b) #define Perl_caller_cx DPPP_(my_caller_cx) #if defined(NEED_caller_cx) || defined(NEED_caller_cx_GLOBAL) const PERL_CONTEXT * DPPP_(my_caller_cx)(pTHX_ I32 count, const PERL_CONTEXT **dbcxp) { register I32 cxix = DPPP_dopoptosub_at(cxstack, cxstack_ix); register const PERL_CONTEXT *cx; register const PERL_CONTEXT *ccstack = cxstack; const PERL_SI *top_si = PL_curstackinfo; for (;;) { while (cxix < 0 && top_si->si_type != PERLSI_MAIN) { top_si = top_si->si_prev; ccstack = top_si->si_cxstack; cxix = DPPP_dopoptosub_at(ccstack, top_si->si_cxix); } if (cxix < 0) return NULL; if (PL_DBsub && GvCV(PL_DBsub) && cxix >= 0 && ccstack[cxix].blk_sub.cv == GvCV(PL_DBsub)) count++; if (!count--) break; cxix = DPPP_dopoptosub_at(ccstack, cxix - 1); } cx = &ccstack[cxix]; if (dbcxp) *dbcxp = cx; if (CxTYPE(cx) == CXt_SUB || CxTYPE(cx) == CXt_FORMAT) { const I32 dbcxix = DPPP_dopoptosub_at(ccstack, cxix - 1); if (PL_DBsub && GvCV(PL_DBsub) && dbcxix >= 0 && ccstack[dbcxix].blk_sub.cv == GvCV(PL_DBsub)) cx = &ccstack[dbcxix]; } return cx; } #endif #endif #endif #ifndef IN_PERL_COMPILETIME #define IN_PERL_COMPILETIME (PL_curcop == &PL_compiling) #endif #ifndef IN_LOCALE_RUNTIME #define IN_LOCALE_RUNTIME (PL_curcop->op_private & HINT_LOCALE) #endif #ifndef IN_LOCALE_COMPILETIME #define IN_LOCALE_COMPILETIME (PL_hints & HINT_LOCALE) #endif #ifndef IN_LOCALE #define IN_LOCALE (IN_PERL_COMPILETIME ? IN_LOCALE_COMPILETIME : IN_LOCALE_RUNTIME) #endif #ifndef IS_NUMBER_IN_UV #define IS_NUMBER_IN_UV 0x01 #endif #ifndef IS_NUMBER_GREATER_THAN_UV_MAX #define IS_NUMBER_GREATER_THAN_UV_MAX 0x02 #endif #ifndef IS_NUMBER_NOT_INT #define IS_NUMBER_NOT_INT 0x04 #endif #ifndef IS_NUMBER_NEG #define IS_NUMBER_NEG 0x08 #endif #ifndef IS_NUMBER_INFINITY #define IS_NUMBER_INFINITY 0x10 #endif #ifndef IS_NUMBER_NAN #define IS_NUMBER_NAN 0x20 #endif #ifndef GROK_NUMERIC_RADIX #define GROK_NUMERIC_RADIX(sp, send) grok_numeric_radix(sp, send) #endif #ifndef PERL_SCAN_GREATER_THAN_UV_MAX #define PERL_SCAN_GREATER_THAN_UV_MAX 0x02 #endif #ifndef PERL_SCAN_SILENT_ILLDIGIT #define PERL_SCAN_SILENT_ILLDIGIT 0x04 #endif #ifndef PERL_SCAN_ALLOW_UNDERSCORES #define PERL_SCAN_ALLOW_UNDERSCORES 0x01 #endif #ifndef PERL_SCAN_DISALLOW_PREFIX #define PERL_SCAN_DISALLOW_PREFIX 0x02 #endif #ifndef grok_numeric_radix #if defined(NEED_grok_numeric_radix) static bool DPPP_(my_grok_numeric_radix)(pTHX_ const char ** sp, const char * send); static #else extern bool DPPP_(my_grok_numeric_radix)(pTHX_ const char ** sp, const char * send); #endif #ifdef grok_numeric_radix #undef grok_numeric_radix #endif #define grok_numeric_radix(a,b) DPPP_(my_grok_numeric_radix)(aTHX_ a,b) #define Perl_grok_numeric_radix DPPP_(my_grok_numeric_radix) #if defined(NEED_grok_numeric_radix) || defined(NEED_grok_numeric_radix_GLOBAL) bool DPPP_(my_grok_numeric_radix)(pTHX_ const char **sp, const char *send) { #ifdef USE_LOCALE_NUMERIC #ifdef PL_numeric_radix_sv if (PL_numeric_radix_sv && IN_LOCALE) { STRLEN len; char* radix = SvPV(PL_numeric_radix_sv, len); if (*sp + len <= send && memEQ(*sp, radix, len)) { *sp += len; return TRUE; } } #else #include dTHR; struct lconv *lc = localeconv(); char *radix = lc->decimal_point; if (radix && IN_LOCALE) { STRLEN len = strlen(radix); if (*sp + len <= send && memEQ(*sp, radix, len)) { *sp += len; return TRUE; } } #endif #endif if (*sp < send && **sp == '.') { ++*sp; return TRUE; } return FALSE; } #endif #endif #ifndef grok_number #if defined(NEED_grok_number) static int DPPP_(my_grok_number)(pTHX_ const char * pv, STRLEN len, UV * valuep); static #else extern int DPPP_(my_grok_number)(pTHX_ const char * pv, STRLEN len, UV * valuep); #endif #ifdef grok_number #undef grok_number #endif #define grok_number(a,b,c) DPPP_(my_grok_number)(aTHX_ a,b,c) #define Perl_grok_number DPPP_(my_grok_number) #if defined(NEED_grok_number) || defined(NEED_grok_number_GLOBAL) int DPPP_(my_grok_number)(pTHX_ const char *pv, STRLEN len, UV *valuep) { const char *s = pv; const char *send = pv + len; const UV max_div_10 = UV_MAX / 10; const char max_mod_10 = UV_MAX % 10; int numtype = 0; int sawinf = 0; int sawnan = 0; while (s < send && isSPACE(*s)) s++; if (s == send) { return 0; } else if (*s == '-') { s++; numtype = IS_NUMBER_NEG; } else if (*s == '+') s++; if (s == send) return 0; if (isDIGIT(*s)) { UV value = *s - '0'; if (++s < send) { int digit = *s - '0'; if (digit >= 0 && digit <= 9) { value = value * 10 + digit; if (++s < send) { digit = *s - '0'; if (digit >= 0 && digit <= 9) { value = value * 10 + digit; if (++s < send) { digit = *s - '0'; if (digit >= 0 && digit <= 9) { value = value * 10 + digit; if (++s < send) { digit = *s - '0'; if (digit >= 0 && digit <= 9) { value = value * 10 + digit; if (++s < send) { digit = *s - '0'; if (digit >= 0 && digit <= 9) { value = value * 10 + digit; if (++s < send) { digit = *s - '0'; if (digit >= 0 && digit <= 9) { value = value * 10 + digit; if (++s < send) { digit = *s - '0'; if (digit >= 0 && digit <= 9) { value = value * 10 + digit; if (++s < send) { digit = *s - '0'; if (digit >= 0 && digit <= 9) { value = value * 10 + digit; if (++s < send) { digit = *s - '0'; while (digit >= 0 && digit <= 9 && (value < max_div_10 || (value == max_div_10 && digit <= max_mod_10))) { value = value * 10 + digit; if (++s < send) digit = *s - '0'; else break; } if (digit >= 0 && digit <= 9 && (s < send)) { do { s++; } while (s < send && isDIGIT(*s)); numtype |= IS_NUMBER_GREATER_THAN_UV_MAX; goto skip_value; } } } } } } } } } } } } } } } } } } numtype |= IS_NUMBER_IN_UV; if (valuep) *valuep = value; skip_value: if (GROK_NUMERIC_RADIX(&s, send)) { numtype |= IS_NUMBER_NOT_INT; while (s < send && isDIGIT(*s)) s++; } } else if (GROK_NUMERIC_RADIX(&s, send)) { numtype |= IS_NUMBER_NOT_INT | IS_NUMBER_IN_UV; if (s < send && isDIGIT(*s)) { do { s++; } while (s < send && isDIGIT(*s)); if (valuep) { *valuep = 0; } } else return 0; } else if (*s == 'I' || *s == 'i') { s++; if (s == send || (*s != 'N' && *s != 'n')) return 0; s++; if (s == send || (*s != 'F' && *s != 'f')) return 0; s++; if (s < send && (*s == 'I' || *s == 'i')) { s++; if (s == send || (*s != 'N' && *s != 'n')) return 0; s++; if (s == send || (*s != 'I' && *s != 'i')) return 0; s++; if (s == send || (*s != 'T' && *s != 't')) return 0; s++; if (s == send || (*s != 'Y' && *s != 'y')) return 0; s++; } sawinf = 1; } else if (*s == 'N' || *s == 'n') { s++; if (s == send || (*s != 'A' && *s != 'a')) return 0; s++; if (s == send || (*s != 'N' && *s != 'n')) return 0; s++; sawnan = 1; } else return 0; if (sawinf) { numtype &= IS_NUMBER_NEG; numtype |= IS_NUMBER_INFINITY | IS_NUMBER_NOT_INT; } else if (sawnan) { numtype &= IS_NUMBER_NEG; numtype |= IS_NUMBER_NAN | IS_NUMBER_NOT_INT; } else if (s < send) { if (*s == 'e' || *s == 'E') { numtype &= IS_NUMBER_NEG; numtype |= IS_NUMBER_NOT_INT; s++; if (s < send && (*s == '-' || *s == '+')) s++; if (s < send && isDIGIT(*s)) { do { s++; } while (s < send && isDIGIT(*s)); } else return 0; } } while (s < send && isSPACE(*s)) s++; if (s >= send) return numtype; if (len == 10 && memEQ(pv, "0 but true", 10)) { if (valuep) *valuep = 0; return IS_NUMBER_IN_UV; } return 0; } #endif #endif #ifndef grok_bin #if defined(NEED_grok_bin) static UV DPPP_(my_grok_bin)(pTHX_ const char * start, STRLEN * len_p, I32 * flags, NV * result); static #else extern UV DPPP_(my_grok_bin)(pTHX_ const char * start, STRLEN * len_p, I32 * flags, NV * result); #endif #ifdef grok_bin #undef grok_bin #endif #define grok_bin(a,b,c,d) DPPP_(my_grok_bin)(aTHX_ a,b,c,d) #define Perl_grok_bin DPPP_(my_grok_bin) #if defined(NEED_grok_bin) || defined(NEED_grok_bin_GLOBAL) UV DPPP_(my_grok_bin)(pTHX_ const char *start, STRLEN *len_p, I32 *flags, NV *result) { const char *s = start; STRLEN len = *len_p; UV value = 0; NV value_nv = 0; const UV max_div_2 = UV_MAX / 2; bool allow_underscores = *flags & PERL_SCAN_ALLOW_UNDERSCORES; bool overflowed = FALSE; if (!(*flags & PERL_SCAN_DISALLOW_PREFIX)) { if (len >= 1) { if (s[0] == 'b') { s++; len--; } else if (len >= 2 && s[0] == '0' && s[1] == 'b') { s+=2; len-=2; } } } for (; len-- && *s; s++) { char bit = *s; if (bit == '0' || bit == '1') { redo: if (!overflowed) { if (value <= max_div_2) { value = (value << 1) | (bit - '0'); continue; } warn("Integer overflow in binary number"); overflowed = TRUE; value_nv = (NV) value; } value_nv *= 2.0; value_nv += (NV)(bit - '0'); continue; } if (bit == '_' && len && allow_underscores && (bit = s[1]) && (bit == '0' || bit == '1')) { --len; ++s; goto redo; } if (!(*flags & PERL_SCAN_SILENT_ILLDIGIT)) warn("Illegal binary digit '%c' ignored", *s); break; } if ( ( overflowed && value_nv > 4294967295.0) #if UVSIZE > 4 || (!overflowed && value > 0xffffffff ) #endif ) { warn("Binary number > 0b11111111111111111111111111111111 non-portable"); } *len_p = s - start; if (!overflowed) { *flags = 0; return value; } *flags = PERL_SCAN_GREATER_THAN_UV_MAX; if (result) *result = value_nv; return UV_MAX; } #endif #endif #ifndef grok_hex #if defined(NEED_grok_hex) static UV DPPP_(my_grok_hex)(pTHX_ const char * start, STRLEN * len_p, I32 * flags, NV * result); static #else extern UV DPPP_(my_grok_hex)(pTHX_ const char * start, STRLEN * len_p, I32 * flags, NV * result); #endif #ifdef grok_hex #undef grok_hex #endif #define grok_hex(a,b,c,d) DPPP_(my_grok_hex)(aTHX_ a,b,c,d) #define Perl_grok_hex DPPP_(my_grok_hex) #if defined(NEED_grok_hex) || defined(NEED_grok_hex_GLOBAL) UV DPPP_(my_grok_hex)(pTHX_ const char *start, STRLEN *len_p, I32 *flags, NV *result) { const char *s = start; STRLEN len = *len_p; UV value = 0; NV value_nv = 0; const UV max_div_16 = UV_MAX / 16; bool allow_underscores = *flags & PERL_SCAN_ALLOW_UNDERSCORES; bool overflowed = FALSE; const char *xdigit; if (!(*flags & PERL_SCAN_DISALLOW_PREFIX)) { if (len >= 1) { if (s[0] == 'x') { s++; len--; } else if (len >= 2 && s[0] == '0' && s[1] == 'x') { s+=2; len-=2; } } } for (; len-- && *s; s++) { xdigit = strchr((char *) PL_hexdigit, *s); if (xdigit) { redo: if (!overflowed) { if (value <= max_div_16) { value = (value << 4) | ((xdigit - PL_hexdigit) & 15); continue; } warn("Integer overflow in hexadecimal number"); overflowed = TRUE; value_nv = (NV) value; } value_nv *= 16.0; value_nv += (NV)((xdigit - PL_hexdigit) & 15); continue; } if (*s == '_' && len && allow_underscores && s[1] && (xdigit = strchr((char *) PL_hexdigit, s[1]))) { --len; ++s; goto redo; } if (!(*flags & PERL_SCAN_SILENT_ILLDIGIT)) warn("Illegal hexadecimal digit '%c' ignored", *s); break; } if ( ( overflowed && value_nv > 4294967295.0) #if UVSIZE > 4 || (!overflowed && value > 0xffffffff ) #endif ) { warn("Hexadecimal number > 0xffffffff non-portable"); } *len_p = s - start; if (!overflowed) { *flags = 0; return value; } *flags = PERL_SCAN_GREATER_THAN_UV_MAX; if (result) *result = value_nv; return UV_MAX; } #endif #endif #ifndef grok_oct #if defined(NEED_grok_oct) static UV DPPP_(my_grok_oct)(pTHX_ const char * start, STRLEN * len_p, I32 * flags, NV * result); static #else extern UV DPPP_(my_grok_oct)(pTHX_ const char * start, STRLEN * len_p, I32 * flags, NV * result); #endif #ifdef grok_oct #undef grok_oct #endif #define grok_oct(a,b,c,d) DPPP_(my_grok_oct)(aTHX_ a,b,c,d) #define Perl_grok_oct DPPP_(my_grok_oct) #if defined(NEED_grok_oct) || defined(NEED_grok_oct_GLOBAL) UV DPPP_(my_grok_oct)(pTHX_ const char *start, STRLEN *len_p, I32 *flags, NV *result) { const char *s = start; STRLEN len = *len_p; UV value = 0; NV value_nv = 0; const UV max_div_8 = UV_MAX / 8; bool allow_underscores = *flags & PERL_SCAN_ALLOW_UNDERSCORES; bool overflowed = FALSE; for (; len-- && *s; s++) { int digit = *s - '0'; if (digit >= 0 && digit <= 7) { redo: if (!overflowed) { if (value <= max_div_8) { value = (value << 3) | digit; continue; } warn("Integer overflow in octal number"); overflowed = TRUE; value_nv = (NV) value; } value_nv *= 8.0; value_nv += (NV)digit; continue; } if (digit == ('_' - '0') && len && allow_underscores && (digit = s[1] - '0') && (digit >= 0 && digit <= 7)) { --len; ++s; goto redo; } if (digit == 8 || digit == 9) { if (!(*flags & PERL_SCAN_SILENT_ILLDIGIT)) warn("Illegal octal digit '%c' ignored", *s); } break; } if ( ( overflowed && value_nv > 4294967295.0) #if UVSIZE > 4 || (!overflowed && value > 0xffffffff ) #endif ) { warn("Octal number > 037777777777 non-portable"); } *len_p = s - start; if (!overflowed) { *flags = 0; return value; } *flags = PERL_SCAN_GREATER_THAN_UV_MAX; if (result) *result = value_nv; return UV_MAX; } #endif #endif #if !defined(my_snprintf) #if defined(NEED_my_snprintf) static int DPPP_(my_my_snprintf)(char * buffer, const Size_t len, const char * format, ...); static #else extern int DPPP_(my_my_snprintf)(char * buffer, const Size_t len, const char * format, ...); #endif #define my_snprintf DPPP_(my_my_snprintf) #define Perl_my_snprintf DPPP_(my_my_snprintf) #if defined(NEED_my_snprintf) || defined(NEED_my_snprintf_GLOBAL) int DPPP_(my_my_snprintf)(char *buffer, const Size_t len, const char *format, ...) { dTHX; int retval; va_list ap; va_start(ap, format); #ifdef HAS_VSNPRINTF retval = vsnprintf(buffer, len, format, ap); #else retval = vsprintf(buffer, format, ap); #endif va_end(ap); if (retval < 0 || (len > 0 && (Size_t)retval >= len)) Perl_croak(aTHX_ "panic: my_snprintf buffer overflow"); return retval; } #endif #endif #if !defined(my_sprintf) #if defined(NEED_my_sprintf) static int DPPP_(my_my_sprintf)(char * buffer, const char * pat, ...); static #else extern int DPPP_(my_my_sprintf)(char * buffer, const char * pat, ...); #endif #define my_sprintf DPPP_(my_my_sprintf) #define Perl_my_sprintf DPPP_(my_my_sprintf) #if defined(NEED_my_sprintf) || defined(NEED_my_sprintf_GLOBAL) int DPPP_(my_my_sprintf)(char *buffer, const char* pat, ...) { va_list args; va_start(args, pat); vsprintf(buffer, pat, args); va_end(args); return strlen(buffer); } #endif #endif #ifdef NO_XSLOCKS #ifdef dJMPENV #define dXCPT dJMPENV; int rEtV = 0 #define XCPT_TRY_START JMPENV_PUSH(rEtV); if (rEtV == 0) #define XCPT_TRY_END JMPENV_POP; #define XCPT_CATCH if (rEtV != 0) #define XCPT_RETHROW JMPENV_JUMP(rEtV) #else #define dXCPT Sigjmp_buf oldTOP; int rEtV = 0 #define XCPT_TRY_START Copy(top_env, oldTOP, 1, Sigjmp_buf); rEtV = Sigsetjmp(top_env, 1); if (rEtV == 0) #define XCPT_TRY_END Copy(oldTOP, top_env, 1, Sigjmp_buf); #define XCPT_CATCH if (rEtV != 0) #define XCPT_RETHROW Siglongjmp(top_env, rEtV) #endif #endif #if !defined(my_strlcat) #if defined(NEED_my_strlcat) static Size_t DPPP_(my_my_strlcat)(char * dst, const char * src, Size_t size); static #else extern Size_t DPPP_(my_my_strlcat)(char * dst, const char * src, Size_t size); #endif #define my_strlcat DPPP_(my_my_strlcat) #define Perl_my_strlcat DPPP_(my_my_strlcat) #if defined(NEED_my_strlcat) || defined(NEED_my_strlcat_GLOBAL) Size_t DPPP_(my_my_strlcat)(char *dst, const char *src, Size_t size) { Size_t used, length, copy; used = strlen(dst); length = strlen(src); if (size > 0 && used < size - 1) { copy = (length >= size - used) ? size - used - 1 : length; memcpy(dst + used, src, copy); dst[used + copy] = '\0'; } return used + length; } #endif #endif #if !defined(my_strlcpy) #if defined(NEED_my_strlcpy) static Size_t DPPP_(my_my_strlcpy)(char * dst, const char * src, Size_t size); static #else extern Size_t DPPP_(my_my_strlcpy)(char * dst, const char * src, Size_t size); #endif #define my_strlcpy DPPP_(my_my_strlcpy) #define Perl_my_strlcpy DPPP_(my_my_strlcpy) #if defined(NEED_my_strlcpy) || defined(NEED_my_strlcpy_GLOBAL) Size_t DPPP_(my_my_strlcpy)(char *dst, const char *src, Size_t size) { Size_t length, copy; length = strlen(src); if (size > 0) { copy = (length >= size) ? size - 1 : length; memcpy(dst, src, copy); dst[copy] = '\0'; } return length; } #endif #endif #ifndef PERL_PV_ESCAPE_QUOTE #define PERL_PV_ESCAPE_QUOTE 0x0001 #endif #ifndef PERL_PV_PRETTY_QUOTE #define PERL_PV_PRETTY_QUOTE PERL_PV_ESCAPE_QUOTE #endif #ifndef PERL_PV_PRETTY_ELLIPSES #define PERL_PV_PRETTY_ELLIPSES 0x0002 #endif #ifndef PERL_PV_PRETTY_LTGT #define PERL_PV_PRETTY_LTGT 0x0004 #endif #ifndef PERL_PV_ESCAPE_FIRSTCHAR #define PERL_PV_ESCAPE_FIRSTCHAR 0x0008 #endif #ifndef PERL_PV_ESCAPE_UNI #define PERL_PV_ESCAPE_UNI 0x0100 #endif #ifndef PERL_PV_ESCAPE_UNI_DETECT #define PERL_PV_ESCAPE_UNI_DETECT 0x0200 #endif #ifndef PERL_PV_ESCAPE_ALL #define PERL_PV_ESCAPE_ALL 0x1000 #endif #ifndef PERL_PV_ESCAPE_NOBACKSLASH #define PERL_PV_ESCAPE_NOBACKSLASH 0x2000 #endif #ifndef PERL_PV_ESCAPE_NOCLEAR #define PERL_PV_ESCAPE_NOCLEAR 0x4000 #endif #ifndef PERL_PV_ESCAPE_RE #define PERL_PV_ESCAPE_RE 0x8000 #endif #ifndef PERL_PV_PRETTY_NOCLEAR #define PERL_PV_PRETTY_NOCLEAR PERL_PV_ESCAPE_NOCLEAR #endif #ifndef PERL_PV_PRETTY_DUMP #define PERL_PV_PRETTY_DUMP PERL_PV_PRETTY_ELLIPSES|PERL_PV_PRETTY_QUOTE #endif #ifndef PERL_PV_PRETTY_REGPROP #define PERL_PV_PRETTY_REGPROP PERL_PV_PRETTY_ELLIPSES|PERL_PV_PRETTY_LTGT|PERL_PV_ESCAPE_RE #endif #ifndef pv_escape #if defined(NEED_pv_escape) static char * DPPP_(my_pv_escape)(pTHX_ SV * dsv, char const * const str, const STRLEN count, const STRLEN max, STRLEN * const escaped, const U32 flags); static #else extern char * DPPP_(my_pv_escape)(pTHX_ SV * dsv, char const * const str, const STRLEN count, const STRLEN max, STRLEN * const escaped, const U32 flags); #endif #ifdef pv_escape #undef pv_escape #endif #define pv_escape(a,b,c,d,e,f) DPPP_(my_pv_escape)(aTHX_ a,b,c,d,e,f) #define Perl_pv_escape DPPP_(my_pv_escape) #if defined(NEED_pv_escape) || defined(NEED_pv_escape_GLOBAL) char * DPPP_(my_pv_escape)(pTHX_ SV *dsv, char const * const str, const STRLEN count, const STRLEN max, STRLEN * const escaped, const U32 flags) { const char esc = flags & PERL_PV_ESCAPE_RE ? '%' : '\\'; const char dq = flags & PERL_PV_ESCAPE_QUOTE ? '"' : esc; char octbuf[32] = "%123456789ABCDF"; STRLEN wrote = 0; STRLEN chsize = 0; STRLEN readsize = 1; #if defined(is_utf8_string) && defined(utf8_to_uvchr) bool isuni = flags & PERL_PV_ESCAPE_UNI ? 1 : 0; #endif const char *pv = str; const char * const end = pv + count; octbuf[0] = esc; if (!(flags & PERL_PV_ESCAPE_NOCLEAR)) sv_setpvs(dsv, ""); #if defined(is_utf8_string) && defined(utf8_to_uvchr) if ((flags & PERL_PV_ESCAPE_UNI_DETECT) && is_utf8_string((U8*)pv, count)) isuni = 1; #endif for (; pv < end && (!max || wrote < max) ; pv += readsize) { const UV u = #if defined(is_utf8_string) && defined(utf8_to_uvchr) isuni ? utf8_to_uvchr((U8*)pv, &readsize) : #endif (U8)*pv; const U8 c = (U8)u & 0xFF; if (u > 255 || (flags & PERL_PV_ESCAPE_ALL)) { if (flags & PERL_PV_ESCAPE_FIRSTCHAR) chsize = my_snprintf(octbuf, sizeof octbuf, "%" UVxf, u); else chsize = my_snprintf(octbuf, sizeof octbuf, "%cx{%" UVxf "}", esc, u); } else if (flags & PERL_PV_ESCAPE_NOBACKSLASH) { chsize = 1; } else { if (c == dq || c == esc || !isPRINT(c)) { chsize = 2; switch (c) { case '\\' : case '%' : if (c == esc) octbuf[1] = esc; else chsize = 1; break; case '\v' : octbuf[1] = 'v'; break; case '\t' : octbuf[1] = 't'; break; case '\r' : octbuf[1] = 'r'; break; case '\n' : octbuf[1] = 'n'; break; case '\f' : octbuf[1] = 'f'; break; case '"' : if (dq == '"') octbuf[1] = '"'; else chsize = 1; break; default: chsize = my_snprintf(octbuf, sizeof octbuf, pv < end && isDIGIT((U8)*(pv+readsize)) ? "%c%03o" : "%c%o", esc, c); } } else { chsize = 1; } } if (max && wrote + chsize > max) { break; } else if (chsize > 1) { sv_catpvn(dsv, octbuf, chsize); wrote += chsize; } else { char tmp[2]; my_snprintf(tmp, sizeof tmp, "%c", c); sv_catpvn(dsv, tmp, 1); wrote++; } if (flags & PERL_PV_ESCAPE_FIRSTCHAR) break; } if (escaped != NULL) *escaped= pv - str; return SvPVX(dsv); } #endif #endif #ifndef pv_pretty #if defined(NEED_pv_pretty) static char * DPPP_(my_pv_pretty)(pTHX_ SV * dsv, char const * const str, const STRLEN count, const STRLEN max, char const * const start_color, char const * const end_color, const U32 flags); static #else extern char * DPPP_(my_pv_pretty)(pTHX_ SV * dsv, char const * const str, const STRLEN count, const STRLEN max, char const * const start_color, char const * const end_color, const U32 flags); #endif #ifdef pv_pretty #undef pv_pretty #endif #define pv_pretty(a,b,c,d,e,f,g) DPPP_(my_pv_pretty)(aTHX_ a,b,c,d,e,f,g) #define Perl_pv_pretty DPPP_(my_pv_pretty) #if defined(NEED_pv_pretty) || defined(NEED_pv_pretty_GLOBAL) char * DPPP_(my_pv_pretty)(pTHX_ SV *dsv, char const * const str, const STRLEN count, const STRLEN max, char const * const start_color, char const * const end_color, const U32 flags) { const U8 dq = (flags & PERL_PV_PRETTY_QUOTE) ? '"' : '%'; STRLEN escaped; if (!(flags & PERL_PV_PRETTY_NOCLEAR)) sv_setpvs(dsv, ""); if (dq == '"') sv_catpvs(dsv, "\""); else if (flags & PERL_PV_PRETTY_LTGT) sv_catpvs(dsv, "<"); if (start_color != NULL) sv_catpv(dsv, D_PPP_CONSTPV_ARG(start_color)); pv_escape(dsv, str, count, max, &escaped, flags | PERL_PV_ESCAPE_NOCLEAR); if (end_color != NULL) sv_catpv(dsv, D_PPP_CONSTPV_ARG(end_color)); if (dq == '"') sv_catpvs(dsv, "\""); else if (flags & PERL_PV_PRETTY_LTGT) sv_catpvs(dsv, ">"); if ((flags & PERL_PV_PRETTY_ELLIPSES) && escaped < count) sv_catpvs(dsv, "..."); return SvPVX(dsv); } #endif #endif #ifndef pv_display #if defined(NEED_pv_display) static char * DPPP_(my_pv_display)(pTHX_ SV * dsv, const char * pv, STRLEN cur, STRLEN len, STRLEN pvlim); static #else extern char * DPPP_(my_pv_display)(pTHX_ SV * dsv, const char * pv, STRLEN cur, STRLEN len, STRLEN pvlim); #endif #ifdef pv_display #undef pv_display #endif #define pv_display(a,b,c,d,e) DPPP_(my_pv_display)(aTHX_ a,b,c,d,e) #define Perl_pv_display DPPP_(my_pv_display) #if defined(NEED_pv_display) || defined(NEED_pv_display_GLOBAL) char * DPPP_(my_pv_display)(pTHX_ SV *dsv, const char *pv, STRLEN cur, STRLEN len, STRLEN pvlim) { pv_pretty(dsv, pv, cur, pvlim, NULL, NULL, PERL_PV_PRETTY_DUMP); if (len > cur && pv[cur] == '\0') sv_catpvs(dsv, "\\0"); return SvPVX(dsv); } #endif #endif #endif Net-Kafka-1.06/PaxHeader/t000755 777777 777777 00000000300 13556003154 020642 xustar00amironovCORPAD\Domain Users000000 000000 17 gid=697783653 18 uid=2117034315 30 mtime=1572341356.230315334 30 ctime=1572341356.230315334 30 atime=1572341356.893386657 23 SCHILY.dev=16777220 26 SCHILY.ino=12902637718 18 SCHILY.nlink=8 Net-Kafka-1.06/t/000755 €~/aK€)—Ue00000000000 13556003154 017351 5ustar00amironovCORPAD\Domain Users000000 000000 Net-Kafka-1.06/PaxHeader/README000644 777777 777777 00000000300 13555571665 021351 xustar00amironovCORPAD\Domain Users000000 000000 17 gid=697783653 18 uid=2117034315 30 mtime=1572271029.433282734 30 ctime=1572341356.223873345 30 atime=1572341356.432225677 23 SCHILY.dev=16777220 26 SCHILY.ino=12902549999 18 SCHILY.nlink=2 Net-Kafka-1.06/README000644 €~/aK€)—Ue00000032657 13555571665 020023 0ustar00amironovCORPAD\Domain Users000000 000000 NAME Net::Kafka - High-performant Perl client for Apache Kafka SYNOPSIS use Net::Kafka::Producer; use Net::Kafka::Consumer; use AnyEvent; # Produce 1 message into "my_topic" my $condvar = AnyEvent->condvar; my $producer = Net::Kafka::Producer->new( 'bootstrap.servers' => 'localhost:9092' ); $producer->produce( payload => "message", topic => "my_topic" )->then(sub { my $delivery_report = shift; $condvar->send; print "Message successfully delivered with offset " . $delivery_report->{offset}; }, sub { my $error = shift; $condvar->send; die "Unable to produce a message: " . $error->{error} . ", code: " . $error->{code}; }); $condvar->recv; # Consume message from "my_topic" my $consumer = Net::Kafka::Consumer->new( 'bootstrap.servers' => 'localhost:9092', 'group.id' => 'my_consumer_group', 'enable.auto.commit' => 'true', ); $consumer->subscribe( [ "my_topic" ] ); while (1) { my $msg = $kafka->consumer_poll(1000); if ($msg) { if ( $msg->err ) { say "Error: ", Net::Kafka::Error::to_string($err); } else { say $msg->payload; } } } DESCRIPTION This module provides Perl bindings to librdkafka C client library. It is heavily inspired by Kafka::Librd module originally developed by Pavel Shaydo. Please refer to the following modules documentation in order to understand how to use it: * `Net::Kafka::Producer' - asynchronous producer interface * `Net::Kafka::Consumer' - consumer interface that supports both Simple and Distributed modes REQUIREMENTS * GNU make * librdkafka >= 1.0.0 INSTALLATION First install librdkafka (https://github.com/edenhill/librdkafka#installation). BUILD FROM CPAN cpanm install Net::Kafka BUILD FROM SOURCE Sources are available on Github: https://github.com/bookingcom/perl-Net-Kafka. perl Makefile.pl make make test make install Net::Kafka::Producer The Net::Kafka::Producer module provides interface to librdkafka's producer methods. It utilizes signal pipes, AnyEvent watcher and AnyEvent::XSPromises to make its behaviour asynchronous. Taking that into consideration you need to make sure to properly create condvar and `send'/`recv' in order to collect all outstanding promises. It is highly suggested to familirize yourself with both AnyEvent and AnyEvent::XSPromises modules. See SYNOPSIS for example. METHODS new() my $producer = Net::Kafka::Producer->new( 'bootstrap.servers' => 'localhost:9092' ); Create an instance of Net::Kafka::Producer. Accept hash where keys are equal to property names of librdkafka (see https://github.com/edenhill/librdkafka/blob/master/CONFIGURATION.md) . Note that only `error_cb' and `stats_cb' callbacks are supported for Producer. Message delivery reports are served automatically through `Promise' based `produce' method (see below). produce() my $promise = $producer->produce( payload => "my_message", topic => "my_topic", key => "my_key", # optional timestamp => 1234567, # optional, if not specified current local timestamp will be used partition => 0 # optional, if not specified internal librdkafka partitioner will be used headers => $headers, # Optional, see Net::Kafka::Headers )->then(sub { my $delivery_report = shift; print "Message is sent with offset " . $delivery_report->{offset}; })->catch(sub { my $error = shift; print $error->{error} . "\n"; }); Sends a message to Kafka. Accepts hash with parameters. Returns back an instance of `Promise' that will be resolved/rejected later. In case message is successfully send "resolve" callback will receive a delievry report in the form of the hash that contains `offset', `partition' and `timestamp'. If message delivery has failed "reject" callback will receive a hash that contains `error' (a human readable error description) and (optionally) `error_code' that is equal to librdkafka's error code. All error codes are mapped and exported by `Net::Kafka' module as constants (e.g. `Net::Kafka::RD_KAFKA_RESP_ERR__PREV_IN_PROGRESS') for simplicity. partitions_for() my $partitions = $producer->partitions_for("my_topic", $timeout_ms); Returns an `ARRAYREF' that contains partition metadata information about the given topic (leader, replicas, ISR replicas); close() $producer->close(); Explicitly closees `Net::Kafka::Producer' instance and underlying librdkafka handles. Net::Kafka::Consumer The Net::Kafka::Consumer class provides interface to librdkafka's consumer functionality. It supports both "distributed" (subscription based) and "simple" (manual partition assignment) modes of work. METHODS new() my $consumer = Net::Kafka::Consumer->new( 'bootstrap.servers' => 'localhost:9092', 'group.id' => "my_consumer_group", 'enable.auto.commit' => "true", ); Create an instance of Net::Kafka::Consumer. Accept hash where keys are equal to property names of librdkafka (see https://github.com/edenhill/librdkafka/blob/master/CONFIGURATION.md) . Note that not all callbacks are supported at the moment. Supported ones are: `error_cb', `rebalance_cb', `commit_cb' and `stats_cb'. subscribe() $consumer->subscribe([ 'my_topic' ]); Subscribe to topic set using balanced consumer groups. The main entry-point for "distributed" consumer mode - partitions will be assigned automatically using Kafka's GroupApi semantics. Wildcard/regex topics are supported so matching topics will be added to the subscription list. unsubscribe() $consumer->unsubscribe(); Unsubscribe from the current subscription set. assign() # manually assign partitions 0 and 1 to be consumed my $tp_list = Net::Kafka::TopicPartitionList->new(); $tp_list->add("my_topic", 0); $tp_list->add("my_topic", 1); $consumer->assign($tp_list); Atomic assignment of partitions to consume. The main entry-point for "simple" consumer mode - partitions are assigned manually. poll() my $message = $consumer->poll($timeout_ms); Poll the consumer for messages or events. Returns instance of `Net::Kafka::Message'. Will block for at most `timeout_ms' milliseconds. An application should make sure to call `poll' at regular intervals. committed() my $tp_list = Net::Kafka::TopicPartitionList->new(); $tp_list->add("my_topic", 0); $consumer->committed($tp_list); my $offset = $tp_list->offset("my_topic_, 0); Retrieve committed offsets for topics+partitions. offsets_for_times() my $tp_list = Net::Kafka::TopicPartitionList->new(); $tp_list->add("my_topic", 0); $tp_list->set_offset("my_topic", 0, 958349923); # timestamp if passed through offset field $consumer->offsets_for_times($tp_list); my $offset = $tp_list->offset("my_topic"); Look up the offsets for the given partitions by timestamp. pause() my $tp_list = Net::Kafka::TopicPartitionList->new(); $tp_list->add("my_topic", 0); $consumer->pause($tp_list); # pauses consumption of partition 0 of "my_topic" Pause consumption for the provided list of partitions. resume() my $tp_list = Net::Kafka::TopicPartitionList->new(); $tp_list->add("my_topic", 0); $consumer->resume($tp_list); # resumes consumption of partition 0 of "my_topic" Resume consumption for the provided list of partitions. subscription() my $topics = $consumer->subscription(); Returns the current topic subscription partitions_for() my $partitions = $producer->partitions_for("my_topic"); Returns an `ARRAYREF' that contains partition metadata information about the given topic (leader, replicas, ISR replicas); commit() $consumer->commit(); # commit current partition assignment (blocking call) $consumer->commit(1); # commit current partition assignment (non-blocking call) my $tp_list = Net::Kafka::TopicPartitionList->new(); $tp_list->add("my_topic", 0); $tp_list->set_offset("my_topic", 0, 12345); $consumer->commit(0, $tp_list); # commit $tp_list assignment (blocking call); Commit offsets on broker for the provided list of partitions. If no partitions provided current assignment is committed instead. commit_message(); my $message = $consumer->poll(1000); $consumer->commit_message(0, $message); # commit message (blocking call); $consumer->commit_message(1, $message); # commit message (non-blocking call); Commit message's offset on broker for the message's partition. position() my $position_list = Net::Kafka::TopicPartitionList->new(); $position_list->add("my_topic", 0); $consumer->position($position_list); my $position = $position_list->offset("my_topic", 0); Retrieve current positions (offsets) for topics+partitions. The \p offset field of each requested partition will be set to the offset of the last consumed message + 1, or RD_KAFKA_OFFSET_INVALID in case there was no previous message. Note: in this context the last consumed message is the offset consumed by the current librdkafka instance and, in case of rebalancing, not necessarily the last message fetched from the partition. seek() $consumer->seek("my_topic", 0, 12345); # seek partition 0 of "my_topic" to offset "12345" $consumer->seek("my_topic", 0, RD_KAFKA_OFFSET_BEGINNING); # seek to the beginning of "my_topic" partition 0 $consumer->seek("my_topic", 0, RD_KAFKA_OFFSET_END); # seek to the end of "my_topic" partition 0 Seek consumer for topic+partition to offset which is either an absolute or logical offset. query_watermark_offsets() my ($low, $high) = $consumer->query_watermark_offsets("my_topic", 0); Queries Kafka Broker for lowest and highest watermark offsets in the given topic-partition. close() $consumer->close(); Close all consumer handles. Make sure to call it before destroying your application to make sure that all outstanding requests to be flushed. Net::Kafka::Message This class maps to `rd_kafka_message_t' structure from librdkafka and represents message or event. Objects of this class have the following methods: err() return error code from the message topic() return topic name partition() return partition number offset() return offset. Note, that the value is truncated to 32 bit if your perl doesn't support 64 bit integers. key() return message key payload() return message payload headers() return a copy of message headers detach_headers() return message headers and removes them from the message Net::Kafka::Headers This class contains a list of Kafka headers (it allows duplicates). Objects of this class have the following methods: new() create a new instance add(name, value) append a new name/value pair to the header list remove(name) remove all headers with the given name, if any get_last(name) return the last value associated with a given name to_hash() return an hash-of-arrays containing all headers Net::Kafka::Err This class provides static methods to convert error codes into names and descriptions. rd_kafka_get_err_descs() rd_kafka_get_err_descs() returns a hash mapping error codes to description strings. to_string() to_string($code) return the description string for this error code. to_name() to_name($code) return the name of this error code. CAVEATS Message offset is truncated to 32 bit if perl is compiled without support for 64 bit integers. SEE ALSO * https://github.com/edenhill/librdkafka * https://github.com/trinitum/perl-Kafka-Librd LICENSE AND COPYRIGHT Copyright (C) 2016, 2017 Pavel Shaydo Copyright (C) 2018, 2019 Booking.com This program is free software; you can redistribute it and/or modify it under the terms of either: the GNU General Public License as published by the Free Software Foundation; or the Artistic License. See http://dev.perl.org/licenses/ for more information. Net-Kafka-1.06/PaxHeader/README.md000644 777777 777777 00000000277 13555571665 021765 xustar00amironovCORPAD\Domain Users000000 000000 17 gid=697783653 18 uid=2117034315 30 mtime=1572271029.435631441 30 ctime=1572341356.222625644 29 atime=1572341356.43223916 23 SCHILY.dev=16777220 26 SCHILY.ino=12902550000 18 SCHILY.nlink=2 Net-Kafka-1.06/README.md000644 €~/aK€)—Ue00000032240 13555571665 020406 0ustar00amironovCORPAD\Domain Users000000 000000 # NAME [![Build Status](https://travis-ci.com/bookingcom/perl-Net-Kafka.svg?branch=master)](https://travis-ci.com/bookingcom/perl-Net-Kafka) Net::Kafka - High-performant Perl client for Apache Kafka # SYNOPSIS use Net::Kafka::Producer; use Net::Kafka::Consumer; use AnyEvent; # Produce 1 message into "my_topic" my $condvar = AnyEvent->condvar; my $producer = Net::Kafka::Producer->new( 'bootstrap.servers' => 'localhost:9092' ); $producer->produce( payload => "message", topic => "my_topic" )->then(sub { my $delivery_report = shift; $condvar->send; print "Message successfully delivered with offset " . $delivery_report->{offset}; }, sub { my $error = shift; $condvar->send; die "Unable to produce a message: " . $error->{error} . ", code: " . $error->{code}; }); $condvar->recv; # Consume message from "my_topic" my $consumer = Net::Kafka::Consumer->new( 'bootstrap.servers' => 'localhost:9092', 'group.id' => 'my_consumer_group', 'enable.auto.commit' => 'true', ); $consumer->subscribe( [ "my_topic" ] ); while (1) { my $msg = $kafka->consumer_poll(1000); if ($msg) { if ( $msg->err ) { say "Error: ", Net::Kafka::Error::to_string($err); } else { say $msg->payload; } } } # DESCRIPTION This module provides Perl bindings to [librdkafka](https://github.com/edenhill/librdkafka) C client library. It is heavily inspired by [Kafka::Librd](https://metacpan.org/pod/Kafka%3A%3ALibrd) module originally developed by Pavel Shaydo. Please refer to the following modules documentation in order to understand how to use it: - `Net::Kafka::Producer` - asynchronous producer interface - `Net::Kafka::Consumer` - consumer interface that supports both Simple and Distributed modes # REQUIREMENTS - GNU make - librdkafka >= 1.0.0 # INSTALLATION First install librdkafka ([https://github.com/edenhill/librdkafka#installation](https://github.com/edenhill/librdkafka#installation)). ## BUILD FROM CPAN cpanm install Net::Kafka ## BUILD FROM SOURCE Sources are available on Github: [https://github.com/bookingcom/perl-Net-Kafka](https://github.com/bookingcom/perl-Net-Kafka). perl Makefile.pl make make test make install # Net::Kafka::Producer The Net::Kafka::Producer module provides interface to librdkafka's producer methods. It utilizes signal pipes, [AnyEvent](https://metacpan.org/pod/AnyEvent) watcher and [AnyEvent::XSPromises](https://metacpan.org/pod/AnyEvent%3A%3AXSPromises) to make its behaviour asynchronous. Taking that into consideration you need to make sure to properly create condvar and `send`/`recv` in order to collect all outstanding promises. It is highly suggested to familirize yourself with both [AnyEvent](https://metacpan.org/pod/AnyEvent) and [AnyEvent::XSPromises](https://metacpan.org/pod/AnyEvent%3A%3AXSPromises) modules. See ["SYNOPSIS"](#synopsis) for example. ## METHODS - new() my $producer = Net::Kafka::Producer->new( 'bootstrap.servers' => 'localhost:9092' ); Create an instance of Net::Kafka::Producer. Accept hash where keys are equal to property names of librdkafka (see [https://github.com/edenhill/librdkafka/blob/master/CONFIGURATION.md](https://github.com/edenhill/librdkafka/blob/master/CONFIGURATION.md)). Note that only `error_cb` and `stats_cb` callbacks are supported for Producer. Message delivery reports are served automatically through `Promise` based `produce` method (see below). - produce() my $promise = $producer->produce( payload => "my_message", topic => "my_topic", key => "my_key", # optional timestamp => 1234567, # optional, if not specified current local timestamp will be used partition => 0 # optional, if not specified internal librdkafka partitioner will be used headers => $headers, # Optional, see Net::Kafka::Headers )->then(sub { my $delivery_report = shift; print "Message is sent with offset " . $delivery_report->{offset}; })->catch(sub { my $error = shift; print $error->{error} . "\n"; }); Sends a message to Kafka. Accepts hash with parameters. Returns back an instance of `Promise` that will be resolved/rejected later. In case message is successfully send "resolve" callback will receive a delievry report in the form of the hash that contains `offset`, `partition` and `timestamp`. If message delivery has failed "reject" callback will receive a hash that contains `error` (a human readable error description) and (optionally) `error_code` that is equal to librdkafka's error code. All error codes are mapped and exported by `Net::Kafka` module as constants (e.g. `Net::Kafka::RD_KAFKA_RESP_ERR__PREV_IN_PROGRESS`) for simplicity. - partitions\_for() my $partitions = $producer->partitions_for("my_topic", $timeout_ms); Returns an `ARRAYREF` that contains partition metadata information about the given topic (leader, replicas, ISR replicas); - close() $producer->close(); Explicitly closees `Net::Kafka::Producer` instance and underlying librdkafka handles. # Net::Kafka::Consumer The Net::Kafka::Consumer class provides interface to librdkafka's consumer functionality. It supports both "distributed" (subscription based) and "simple" (manual partition assignment) modes of work. ## METHODS - new() my $consumer = Net::Kafka::Consumer->new( 'bootstrap.servers' => 'localhost:9092', 'group.id' => "my_consumer_group", 'enable.auto.commit' => "true", ); Create an instance of Net::Kafka::Consumer. Accept hash where keys are equal to property names of librdkafka (see [https://github.com/edenhill/librdkafka/blob/master/CONFIGURATION.md](https://github.com/edenhill/librdkafka/blob/master/CONFIGURATION.md)). Note that not all callbacks are supported at the moment. Supported ones are: `error_cb`, `rebalance_cb`, `commit_cb` and `stats_cb`. - subscribe() $consumer->subscribe([ 'my_topic' ]); Subscribe to topic set using balanced consumer groups. The main entry-point for "distributed" consumer mode - partitions will be assigned automatically using Kafka's GroupApi semantics. Wildcard/regex topics are supported so matching topics will be added to the subscription list. - unsubscribe() $consumer->unsubscribe(); Unsubscribe from the current subscription set. - assign() # manually assign partitions 0 and 1 to be consumed my $tp_list = Net::Kafka::TopicPartitionList->new(); $tp_list->add("my_topic", 0); $tp_list->add("my_topic", 1); $consumer->assign($tp_list); Atomic assignment of partitions to consume. The main entry-point for "simple" consumer mode - partitions are assigned manually. - poll() my $message = $consumer->poll($timeout_ms); Poll the consumer for messages or events. Returns instance of `Net::Kafka::Message`. Will block for at most `timeout_ms` milliseconds. An application should make sure to call `poll` at regular intervals. - committed() my $tp_list = Net::Kafka::TopicPartitionList->new(); $tp_list->add("my_topic", 0); $consumer->committed($tp_list); my $offset = $tp_list->offset("my_topic_, 0); Retrieve committed offsets for topics+partitions. - offsets\_for\_times() my $tp_list = Net::Kafka::TopicPartitionList->new(); $tp_list->add("my_topic", 0); $tp_list->set_offset("my_topic", 0, 958349923); # timestamp if passed through offset field $consumer->offsets_for_times($tp_list); my $offset = $tp_list->offset("my_topic"); Look up the offsets for the given partitions by timestamp. - pause() my $tp_list = Net::Kafka::TopicPartitionList->new(); $tp_list->add("my_topic", 0); $consumer->pause($tp_list); # pauses consumption of partition 0 of "my_topic" Pause consumption for the provided list of partitions. - resume() my $tp_list = Net::Kafka::TopicPartitionList->new(); $tp_list->add("my_topic", 0); $consumer->resume($tp_list); # resumes consumption of partition 0 of "my_topic" Resume consumption for the provided list of partitions. - subscription() my $topics = $consumer->subscription(); Returns the current topic subscription - partitions\_for() my $partitions = $producer->partitions_for("my_topic"); Returns an `ARRAYREF` that contains partition metadata information about the given topic (leader, replicas, ISR replicas); - commit() $consumer->commit(); # commit current partition assignment (blocking call) $consumer->commit(1); # commit current partition assignment (non-blocking call) my $tp_list = Net::Kafka::TopicPartitionList->new(); $tp_list->add("my_topic", 0); $tp_list->set_offset("my_topic", 0, 12345); $consumer->commit(0, $tp_list); # commit $tp_list assignment (blocking call); Commit offsets on broker for the provided list of partitions. If no partitions provided current assignment is committed instead. - commit\_message(); my $message = $consumer->poll(1000); $consumer->commit_message(0, $message); # commit message (blocking call); $consumer->commit_message(1, $message); # commit message (non-blocking call); Commit message's offset on broker for the message's partition. - position() my $position_list = Net::Kafka::TopicPartitionList->new(); $position_list->add("my_topic", 0); $consumer->position($position_list); my $position = $position_list->offset("my_topic", 0); Retrieve current positions (offsets) for topics+partitions. The \\p offset field of each requested partition will be set to the offset of the last consumed message + 1, or RD\_KAFKA\_OFFSET\_INVALID in case there was no previous message. Note: in this context the last consumed message is the offset consumed by the current librdkafka instance and, in case of rebalancing, not necessarily the last message fetched from the partition. - seek() $consumer->seek("my_topic", 0, 12345); # seek partition 0 of "my_topic" to offset "12345" $consumer->seek("my_topic", 0, RD_KAFKA_OFFSET_BEGINNING); # seek to the beginning of "my_topic" partition 0 $consumer->seek("my_topic", 0, RD_KAFKA_OFFSET_END); # seek to the end of "my_topic" partition 0 Seek consumer for topic+partition to offset which is either an absolute or logical offset. - query\_watermark\_offsets() my ($low, $high) = $consumer->query_watermark_offsets("my_topic", 0); Queries Kafka Broker for lowest and highest watermark offsets in the given topic-partition. - close() $consumer->close(); Close all consumer handles. Make sure to call it before destroying your application to make sure that all outstanding requests to be flushed. # Net::Kafka::Message This class maps to `rd_kafka_message_t` structure from librdkafka and represents message or event. Objects of this class have the following methods: - err() return error code from the message - topic() return topic name - partition() return partition number - offset() return offset. Note, that the value is truncated to 32 bit if your perl doesn't support 64 bit integers. - key() return message key - payload() return message payload - headers() return a copy of message headers - detach\_headers() return message headers and removes them from the message # Net::Kafka::Headers This class contains a list of Kafka headers (it allows duplicates). Objects of this class have the following methods: - new() create a new instance - add(name, value) append a new name/value pair to the header list - remove(name) remove all headers with the given name, if any - get\_last(name) return the last value associated with a given name - to\_hash() return an hash-of-arrays containing all headers # Net::Kafka::Err This class provides static methods to convert error codes into names and descriptions. - rd\_kafka\_get\_err\_descs() rd_kafka_get_err_descs() returns a hash mapping error codes to description strings. - to\_string() to_string($code) return the description string for this error code. - to\_name() to_name($code) return the name of this error code. # CAVEATS Message offset is truncated to 32 bit if perl is compiled without support for 64 bit integers. # SEE ALSO - [https://github.com/edenhill/librdkafka](https://github.com/edenhill/librdkafka) - [https://github.com/trinitum/perl-Kafka-Librd](https://github.com/trinitum/perl-Kafka-Librd) # LICENSE AND COPYRIGHT Copyright (C) 2016, 2017 Pavel Shaydo Copyright (C) 2018, 2019 Booking.com This program is free software; you can redistribute it and/or modify it under the terms of either: the GNU General Public License as published by the Free Software Foundation; or the Artistic License. See http://dev.perl.org/licenses/ for more information. Net-Kafka-1.06/PaxHeader/typemap000644 777777 777777 00000000277 13554325634 022100 xustar00amironovCORPAD\Domain Users000000 000000 17 gid=697783653 18 uid=2117034315 30 mtime=1571924892.696319328 30 ctime=1572341356.229394356 29 atime=1572341356.43218063 23 SCHILY.dev=16777220 26 SCHILY.ino=12902123343 18 SCHILY.nlink=3 Net-Kafka-1.06/typemap000644 €~/aK€)—Ue00000004701 13554325634 020522 0ustar00amironovCORPAD\Domain Users000000 000000 TYPEMAP plrd_kafka_t* T_RDKAFKA rd_kafka_message_t* T_RD_KAFKA_MSG rd_kafka_topic_t* T_RD_KAFKA_TOPIC rd_kafka_topic_partition_list_t* T_RD_KAFKA_TOPIC_PARTITION_LIST rd_kafka_headers_t* T_RD_KAFKA_HEADERS rd_kafka_event_t* T_RD_KAFKA_EVENT HV* T_HVREF_REFCOUNT_FIXED INPUT T_RDKAFKA if(sv_derived_from($arg, \"Net::Kafka\")) { IV tmp = SvIV((SV*)SvRV($arg)); $var = INT2PTR($type, tmp); } else { croak(\"$var is not of type Net::Kafka\"); } T_RD_KAFKA_MSG if(sv_derived_from($arg, \"Net::Kafka::Message\")) { IV tmp = SvIV((SV*)SvRV($arg)); $var = INT2PTR($type, tmp); } else { croak(\"$var is not of type Net::Kafka::Message\"); } T_RD_KAFKA_TOPIC if(sv_derived_from($arg, \"Net::Kafka::Topic\")) { IV tmp = SvIV((SV*)SvRV($arg)); $var = INT2PTR($type, tmp); } else { croak(\"$var is not of type Net::Kafka::Topic\"); } T_RD_KAFKA_TOPIC_PARTITION_LIST if(sv_derived_from($arg, \"Net::Kafka::TopicPartitionList\")) { IV tmp = SvIV((SV*)SvRV($arg)); $var = INT2PTR($type, tmp); } else { croak(\"$var is not of type Net::Kafka::TopicPartitionList\"); } T_RD_KAFKA_HEADERS if(!SvOK($arg)) { $var = NULL; } else if(sv_derived_from($arg, \"Net::Kafka::Headers\")) { IV tmp = SvIV((SV*)SvRV($arg)); $var = INT2PTR($type, tmp); } else { croak(\"$var is not of type Net::Kafka::Headers\"); } T_RD_KAFKA_EVENT if(sv_derived_from($arg, \"Net::Kafka::Event\")) { IV tmp = SvIV((SV*)SvRV($arg)); $var = INT2PTR($type, tmp); } else { croak(\"$var is not of type Net::Kafka::Event\"); } OUTPUT T_RDKAFKA sv_setref_pv($arg, \"Net::Kafka\", (void *)$var); T_RD_KAFKA_MSG sv_setref_pv($arg, \"Net::Kafka::Message\", (void *)$var); T_RD_KAFKA_TOPIC sv_setref_pv($arg, \"Net::Kafka::Topic\", (void *)$var); T_RD_KAFKA_TOPIC_PARTITION_LIST sv_setref_pv($arg, \"Net::Kafka::TopicPartitionList\", (void *)$var); T_RD_KAFKA_HEADERS sv_setref_pv($arg, \"Net::Kafka::Headers\", (void *)$var); T_RD_KAFKA_EVENT sv_setref_pv($arg, \"Net::Kafka::Event\", (void *)$var); Net-Kafka-1.06/PaxHeader/META.yml000644 777777 777777 00000000300 13556003154 021722 xustar00amironovCORPAD\Domain Users000000 000000 17 gid=697783653 18 uid=2117034315 30 mtime=1572341356.509996142 30 ctime=1572341356.512726281 30 atime=1572341356.605508603 23 SCHILY.dev=16777220 26 SCHILY.ino=12902637758 18 SCHILY.nlink=1 Net-Kafka-1.06/META.yml000644 €~/aK€)—Ue00000002421 13556003154 020356 0ustar00amironovCORPAD\Domain Users000000 000000 --- abstract: 'High-performant Perl client for Apache Kafka' author: - 'Rajesh Amradi ' - 'Michael Austin ' - 'Ankit Bhatnagar ' - 'Jaap Eldering ' - 'Osama Elsayed ' - 'Eduardo Dalla Favera ' - 'Pavel Gurkov ' - 'Przemyslaw Iskra ' - 'Alex Mironov ' - 'Pavel Shaydo ' build_requires: ExtUtils::MakeMaker: '0' configure_requires: ExtUtils::MakeMaker: '0' ExtUtils::PkgConfig: '0' dynamic_config: 1 generated_by: 'ExtUtils::MakeMaker version 7.34, CPAN::Meta::Converter version 2.150010' license: perl meta-spec: url: http://module-build.sourceforge.net/META-spec-v1.4.html version: '1.4' name: Net-Kafka no_index: directory: - t - inc requires: AnyEvent: '0' AnyEvent::XSPromises: '0' Test::More: '0' perl: '5.022000' resources: bugtracker: https://github.com/bookingcom/perl-Net-Kafka/issues homepage: https://github.com/bookingcom/perl-Net-Kafka repository: https://github.com/bookingcom/perl-Net-Kafka.git version: '1.06' x_serialization_backend: 'CPAN::Meta::YAML version 0.018' Net-Kafka-1.06/PaxHeader/scripts000755 777777 777777 00000000300 13556003154 022066 xustar00amironovCORPAD\Domain Users000000 000000 17 gid=697783653 18 uid=2117034315 30 mtime=1572341356.226578985 30 ctime=1572341356.226578985 30 atime=1572341356.893408978 23 SCHILY.dev=16777220 26 SCHILY.ino=12902637737 18 SCHILY.nlink=3 Net-Kafka-1.06/scripts/000755 €~/aK€)—Ue00000000000 13556003154 020575 5ustar00amironovCORPAD\Domain Users000000 000000 Net-Kafka-1.06/PaxHeader/lib000755 777777 777777 00000000300 13556003154 021145 xustar00amironovCORPAD\Domain Users000000 000000 17 gid=697783653 18 uid=2117034315 30 mtime=1572341356.228050764 30 ctime=1572341356.228050764 30 atime=1572341356.893422591 23 SCHILY.dev=16777220 26 SCHILY.ino=12902637743 18 SCHILY.nlink=3 Net-Kafka-1.06/lib/000755 €~/aK€)—Ue00000000000 13556003154 017654 5ustar00amironovCORPAD\Domain Users000000 000000 Net-Kafka-1.06/PaxHeader/Makefile.PL000644 777777 777777 00000000300 13555571665 022443 xustar00amironovCORPAD\Domain Users000000 000000 17 gid=697783653 18 uid=2117034315 30 mtime=1572271029.432283445 30 ctime=1572341356.224768715 30 atime=1572341356.432212401 23 SCHILY.dev=16777220 26 SCHILY.ino=12902549998 18 SCHILY.nlink=2 Net-Kafka-1.06/Makefile.PL000644 €~/aK€)—Ue00000004540 13555571665 021103 0ustar00amironovCORPAD\Domain Users000000 000000 use 5.22.0; use strict; use warnings; use Config; use ExtUtils::MakeMaker; use ExtUtils::PkgConfig; use constant LIBRDKAFKA_MIN_VERSION => "1.0.0"; # If you have a local librdkafka installation and you want to use that instead # of the system-wide, do something like: # PKG_CONFIG_PATH=/usr/local/lib/pkgconfig perl Makefile.PL # or whatever is the actual installation path my %rdkafka = ExtUtils::PkgConfig->find('rdkafka'); warn sprintf( "WARNING: Installed librdkafka version %s is lower than tested %s", $rdkafka{modversion}, LIBRDKAFKA_MIN_VERSION ) if $rdkafka{modversion} lt LIBRDKAFKA_MIN_VERSION; print "Compiling Net::Kafka with librdkafka $rdkafka{modversion}\n\n"; WriteMakefile( NAME => 'Net::Kafka', DISTNAME => 'Net-Kafka', AUTHOR => [ 'Rajesh Amradi ', 'Michael Austin ', 'Ankit Bhatnagar ', 'Jaap Eldering ', 'Osama Elsayed ', 'Eduardo Dalla Favera ', 'Pavel Gurkov ', 'Przemyslaw Iskra ', 'Alex Mironov ', 'Pavel Shaydo ', ], LICENSE => 'perl', VERSION_FROM => 'lib/Net/Kafka.pm', ABSTRACT_FROM => 'lib/Net/Kafka.pm', LIBS => [ "-lrdkafka -lpthread" ], OBJECT => '$(O_FILES)', TYPEMAPS => ['typemap'], PREREQ_PM => { 'AnyEvent' => 0, 'Test::More' => 0, 'AnyEvent::XSPromises' => 0, }, META_MERGE => { "meta-spec" => { version => 2 }, resources => { homepage => 'https://github.com/bookingcom/perl-Net-Kafka', bugtracker => { web => 'https://github.com/bookingcom/perl-Net-Kafka/issues' }, repository => { type => 'git', url => 'https://github.com/bookingcom/perl-Net-Kafka.git', web => 'https://github.com/bookingcom/perl-Net-Kafka', }, }, }, MIN_PERL_VERSION => '5.22.0', CONFIGURE_REQUIRES => { 'ExtUtils::MakeMaker' => 0, 'ExtUtils::PkgConfig' => 0, }, ); Net-Kafka-1.06/PaxHeader/Kafka.xs000644 777777 777777 00000000300 13556002700 022036 xustar00amironovCORPAD\Domain Users000000 000000 17 gid=697783653 18 uid=2117034315 30 mtime=1572341184.377055963 30 ctime=1572341356.075012822 30 atime=1572341357.267741657 23 SCHILY.dev=16777220 26 SCHILY.ino=12902636762 18 SCHILY.nlink=2 Net-Kafka-1.06/Kafka.xs000644 €~/aK€)—Ue00000066144 13556002700 020506 0ustar00amironovCORPAD\Domain Users000000 000000 /* vim: set expandtab sts=4: */ #define PERL_NO_GET_CONTEXT #include #include #include #include #include #include #include #include #ifndef TIME_UTC #ifdef CLOCK_REALTIME #define TIME_UTC CLOCK_REALTIME #else #define TIME_UTC 0 #endif #endif #include "ppport.h" #include "netkafka.h" static void make_constant_iv( pTHX_ HV *stash, const char *name, size_t namelen, IV value_iv ) { SV **sv = hv_fetch( stash, name, namelen, TRUE ); SV *sv_value = newSViv( value_iv ); if ( SvOK( *sv ) || SvTYPE( *sv ) == SVt_PVGV ) { /* For whatever reason it already exists in the stash, we need to * create the slow constsub */ newCONSTSUB( stash, name, sv_value ); } else { /* Create a read-only constant. Fast, optimised at perl compilation. */ SvUPGRADE( *sv, SVt_RV ); SvRV_set( *sv, sv_value ); SvROK_on( *sv ); SvREADONLY_on( sv_value ); } } #define MAKE_CONSTANT_IV( name ) make_constant_iv( aTHX_ stash, #name, strlen( #name ), name ) MODULE = Net::Kafka PACKAGE = Net::Kafka PREFIX = krd_ PROTOTYPES: DISABLE BOOT: { dTHX; HV *stash = get_hv( "Net::Kafka::", GV_ADD ); MAKE_CONSTANT_IV( RD_KAFKA_VERSION ); MAKE_CONSTANT_IV( RD_KAFKA_PRODUCER ); MAKE_CONSTANT_IV( RD_KAFKA_CONSUMER ); MAKE_CONSTANT_IV( RD_KAFKA_TIMESTAMP_NOT_AVAILABLE ); MAKE_CONSTANT_IV( RD_KAFKA_TIMESTAMP_CREATE_TIME ); MAKE_CONSTANT_IV( RD_KAFKA_TIMESTAMP_LOG_APPEND_TIME ); MAKE_CONSTANT_IV( RD_KAFKA_PARTITION_UA ); MAKE_CONSTANT_IV( RD_KAFKA_OFFSET_BEGINNING ); MAKE_CONSTANT_IV( RD_KAFKA_OFFSET_END ); MAKE_CONSTANT_IV( RD_KAFKA_OFFSET_STORED ); MAKE_CONSTANT_IV( RD_KAFKA_OFFSET_INVALID ); MAKE_CONSTANT_IV( RD_KAFKA_EVENT_NONE ); MAKE_CONSTANT_IV( RD_KAFKA_EVENT_DR ); MAKE_CONSTANT_IV( RD_KAFKA_EVENT_FETCH ); MAKE_CONSTANT_IV( RD_KAFKA_EVENT_LOG ); MAKE_CONSTANT_IV( RD_KAFKA_EVENT_ERROR ); MAKE_CONSTANT_IV( RD_KAFKA_EVENT_REBALANCE ); MAKE_CONSTANT_IV( RD_KAFKA_EVENT_OFFSET_COMMIT ); MAKE_CONSTANT_IV( RD_KAFKA_EVENT_STATS ); ++PL_sub_generation; } const char * krd_rd_kafka_version() CODE: RETVAL = rd_kafka_version_str(); OUTPUT: RETVAL void krd_new(package, type, params = NULL) char *package int type HV* params PREINIT: plrd_kafka_t *krd; rd_kafka_conf_t* conf; rd_kafka_t* rk; char errstr[ERRSTR_SIZE]; PPCODE: Newxz(krd, 1, plrd_kafka_t); conf = krd_parse_config(aTHX_ krd, params); if (type == RD_KAFKA_PRODUCER) { DEBUGF(krd->debug_xs, "Creating producer"); prd_init(krd, conf); } else { DEBUGF(krd->debug_xs, "Creating consumer"); cns_init(krd, conf); } rk = rd_kafka_new(type, conf, errstr, ERRSTR_SIZE); if (rk == NULL) { croak("%s", errstr); } krd->rk = rk; krd->thx = (IV) PERL_GET_THX; krd->type = type; krd->is_closed = 0; ST(0) = sv_newmortal(); sv_setref_pv(ST(0), "Net::Kafka", (void *)krd); krd->self = newSVsv((SV*)ST(0)); if (type == RD_KAFKA_PRODUCER) { prd_start(krd); } else { cns_start(krd); } XSRETURN(1); const char * krd_get_debug_contexts() CODE: RETVAL = rd_kafka_get_debug_contexts(); OUTPUT: RETVAL void krd_subscribe(rdk, topics) plrd_kafka_t* rdk AV* topics PREINIT: STRLEN strl; int i, len; rd_kafka_topic_partition_list_t* topic_list; rd_kafka_resp_err_t err; char* topic; SV** topic_sv; CODE: len = av_len(topics) + 1; topic_list = rd_kafka_topic_partition_list_new(len); for (i=0; i < len; i++) { topic_sv = av_fetch(topics, i, 0); if (topic_sv != NULL) { topic = SvPV(*topic_sv, strl); rd_kafka_topic_partition_list_add(topic_list, topic, -1); } } err = rd_kafka_subscribe(rdk->rk, topic_list); rd_kafka_topic_partition_list_destroy(topic_list); if (err != RD_KAFKA_RESP_ERR_NO_ERROR) { croak("Error subscribing to topics: %s", rd_kafka_err2str(err)); } void krd_unsubscribe(rdk) plrd_kafka_t* rdk PREINIT: rd_kafka_resp_err_t err; CODE: err = rd_kafka_unsubscribe(rdk->rk); if (err != RD_KAFKA_RESP_ERR_NO_ERROR) { croak("Error unsubscribing from topics: %s", rd_kafka_err2str(err)); } rd_kafka_topic_partition_list_t * krd_subscription(rdk) plrd_kafka_t* rdk PREINIT: rd_kafka_topic_partition_list_t* tp_list; rd_kafka_resp_err_t err; CODE: err = rd_kafka_subscription(rdk->rk, &tp_list); if (err != RD_KAFKA_RESP_ERR_NO_ERROR) { croak("Error retrieving subscriptions: %s", rd_kafka_err2str(err)); } RETVAL = tp_list; OUTPUT: RETVAL void krd_assign(rdk, tp_list = NULL) plrd_kafka_t* rdk rd_kafka_topic_partition_list_t* tp_list PREINIT: rd_kafka_resp_err_t err; CODE: err = rd_kafka_assign(rdk->rk, tp_list); if (err != RD_KAFKA_RESP_ERR_NO_ERROR) { croak("Error assigning partitions: %s", rd_kafka_err2str(err)); } void krd_position(rdk, tp_list) plrd_kafka_t* rdk rd_kafka_topic_partition_list_t* tp_list PREINIT: rd_kafka_resp_err_t err; CODE: err = rd_kafka_position(rdk->rk, tp_list); if (err != RD_KAFKA_RESP_ERR_NO_ERROR) { croak("Error getting position: %s", rd_kafka_err2str(err)); } rd_kafka_topic_partition_list_t * krd_assignment(rdk) plrd_kafka_t *rdk PREINIT: rd_kafka_topic_partition_list_t* tp_list; rd_kafka_resp_err_t err; CODE: err = rd_kafka_assignment(rdk->rk, &tp_list); if (err != RD_KAFKA_RESP_ERR_NO_ERROR) { croak("Error retrieving assignments: %s", rd_kafka_err2str(err)); } RETVAL = tp_list; OUTPUT: RETVAL rd_kafka_event_t* krd_queue_poll(rdk, timeout_ms = 0) plrd_kafka_t *rdk int timeout_ms PREINIT: rd_kafka_event_t* rke; PPCODE: rke = rd_kafka_queue_poll(rdk->queue, timeout_ms); if (! rke) { XSRETURN_EMPTY; return; } ST(0) = sv_newmortal(); sv_setref_pv( ST(0), "Net::Kafka::Event", (void *)rke ); XSRETURN(1); long krd_queue_length(rdk) plrd_kafka_t *rdk CODE: RETVAL = rd_kafka_queue_length(rdk->queue); OUTPUT: RETVAL void krd_commit(rdk, async = 0, tp_list = NULL) plrd_kafka_t* rdk int async rd_kafka_topic_partition_list_t* tp_list PREINIT: rd_kafka_resp_err_t err; CODE: err = rd_kafka_commit(rdk->rk, tp_list, async); if (err != RD_KAFKA_RESP_ERR_NO_ERROR && err != RD_KAFKA_RESP_ERR__NO_OFFSET) { croak("Error committing offsets: %s", rd_kafka_err2str(err)); } void krd_commit_message(rdk, async = 0, rd_msg) plrd_kafka_t *rdk int async rd_kafka_message_t *rd_msg PREINIT: rd_kafka_resp_err_t err; CODE: err = rd_kafka_commit_message(rdk->rk, rd_msg, async); if (err != RD_KAFKA_RESP_ERR_NO_ERROR) { croak("Error committing message: %s", rd_kafka_err2str(err)); } void krd_committed(rdk, tp_list, timeout_ms) plrd_kafka_t* rdk rd_kafka_topic_partition_list_t* tp_list int timeout_ms PREINIT: rd_kafka_resp_err_t err; CODE: err = rd_kafka_committed(rdk->rk, tp_list, timeout_ms); if (err != RD_KAFKA_RESP_ERR_NO_ERROR) { croak("Error retrieving commited offsets: %s", rd_kafka_err2str(err)); } void krd_offsets_for_times(rdk, tp_list, timeout_ms) plrd_kafka_t* rdk rd_kafka_topic_partition_list_t* tp_list int timeout_ms PREINIT: rd_kafka_resp_err_t err; CODE: err = rd_kafka_offsets_for_times(rdk->rk, tp_list, timeout_ms); if (err != RD_KAFKA_RESP_ERR_NO_ERROR) { croak("Error retrieving offsets for times: %s", rd_kafka_err2str(err)); } void krd_pause(rdk, tp_list = NULL) plrd_kafka_t *rdk rd_kafka_topic_partition_list_t* tp_list PREINIT: rd_kafka_resp_err_t err; CODE: err = rd_kafka_pause_partitions(rdk->rk, tp_list); if (err != RD_KAFKA_RESP_ERR_NO_ERROR) { croak("Error pausing partitions: %s", rd_kafka_err2str(err)); } void krd_produce(rdk, topic, partition, key, payload, timestamp, msg_id, msgflags = 0, hdrs = NULL) plrd_kafka_t *rdk char *topic int partition SV *key SV *payload long timestamp IV msg_id int msgflags rd_kafka_headers_t *hdrs PREINIT: STRLEN plen = 0, klen = 0; char *plptr = NULL, *keyptr = NULL; rd_kafka_resp_err_t err; CODE: if (SvOK(payload)) plptr = SvPVbyte(payload, plen); if (SvOK(key)) keyptr = SvPVbyte(key, klen); err = rd_kafka_producev( rdk->rk, RD_KAFKA_V_TOPIC(topic), RD_KAFKA_V_PARTITION(partition), RD_KAFKA_V_MSGFLAGS(RD_KAFKA_MSG_F_COPY | msgflags), RD_KAFKA_V_KEY(keyptr, klen), RD_KAFKA_V_TIMESTAMP(timestamp), RD_KAFKA_V_VALUE(plptr, plen), RD_KAFKA_V_OPAQUE((void *) msg_id), /* making a copy here avoids ownership nightmares */ RD_KAFKA_V_HEADERS(hdrs ? rd_kafka_headers_copy(hdrs) : NULL), RD_KAFKA_V_END); if (err) { croak("Error producing: %s", rd_kafka_err2str(err)); } void krd_resume(rdk, tp_list = NULL) plrd_kafka_t *rdk rd_kafka_topic_partition_list_t* tp_list PREINIT: rd_kafka_resp_err_t err; CODE: err = rd_kafka_resume_partitions(rdk->rk, tp_list); if (err != RD_KAFKA_RESP_ERR_NO_ERROR) { croak("Error resuming partitions: %s", rd_kafka_err2str(err)); } void krd_consumer_poll(rdk, timeout_ms = 0) plrd_kafka_t *rdk int timeout_ms PPCODE: rd_kafka_message_t *rd_msg = rd_kafka_consumer_poll( rdk->rk, timeout_ms ); if (! rd_msg) { XSRETURN_EMPTY; return; } ST(0) = sv_newmortal(); sv_setref_pv( ST(0), "Net::Kafka::Message", (void *)rd_msg ); XSRETURN(1); void krd_topic(rdk, topic) plrd_kafka_t* rdk char *topic PPCODE: rd_kafka_topic_t* rd_topic = rd_kafka_topic_new(rdk->rk, topic, NULL); DEBUG2F(rdk->debug_xs, "Created Net::Kafka::Topic %s", rd_kafka_topic_name(rd_topic)); ST(0) = sv_newmortal(); sv_setref_pv( ST(0), "Net::Kafka::Topic", (void *)rd_topic ); XSRETURN(1); void krd_close(rdk) plrd_kafka_t* rdk CODE: krd_close_handles(rdk); void krd_DESTROY(rdk) plrd_kafka_t* rdk CODE: krd_close_handles(rdk); if (rdk->thx == (IV)PERL_GET_THX) Safefree(rdk); void krd_dump(rdk) plrd_kafka_t* rdk CODE: rd_kafka_dump(stdout, rdk->rk); void krd_query_watermark_offsets(rdk, topic, partition, timeout_ms) plrd_kafka_t* rdk char *topic int partition long timeout_ms PREINIT: rd_kafka_resp_err_t err; long low, high; PPCODE: err = rd_kafka_query_watermark_offsets(rdk->rk, topic, partition, &low, &high, timeout_ms); if (err != RD_KAFKA_RESP_ERR_NO_ERROR) { croak("Error querying watermark offsets: %s", rd_kafka_err2str(err)); } EXTEND(SP, 2); PUSHs(sv_2mortal(newSViv(low))); PUSHs(sv_2mortal(newSViv(high))); MODULE = Net::Kafka PACKAGE = Net::Kafka::Event PREFIX = krdev_ PROTOTYPES: DISABLE const char * krdev_event_name(rkev) rd_kafka_event_t* rkev CODE: RETVAL = rd_kafka_event_name(rkev); OUTPUT: RETVAL int krdev_event_error(rkev) rd_kafka_event_t* rkev CODE: RETVAL = rd_kafka_event_error(rkev); OUTPUT: RETVAL const char * krdev_event_error_string(rkev) rd_kafka_event_t* rkev CODE: RETVAL = rd_kafka_event_error_string(rkev); OUTPUT: RETVAL const char* krdev_event_stats(rkev) rd_kafka_event_t* rkev CODE: RETVAL = rd_kafka_event_stats(rkev); OUTPUT: RETVAL int krdev_event_message_count(rkev) rd_kafka_event_t* rkev CODE: RETVAL = rd_kafka_event_message_count(rkev); OUTPUT: RETVAL HV* krdev_event_delivery_report_next(rkev) rd_kafka_event_t* rkev PREINIT: const rd_kafka_message_t* rkm; CODE: rkm = rd_kafka_event_message_next(rkev); if (! rkm) { XSRETURN_UNDEF; return; } RETVAL = newHV(); hv_stores(RETVAL, "offset", newSViv(rkm->offset)); hv_stores(RETVAL, "partition", newSViv(rkm->partition)); hv_stores(RETVAL, "timestamp", newSViv(rd_kafka_message_timestamp(rkm, NULL))); hv_stores(RETVAL, "msg_id", newSViv((IV)rkm->_private)); if (rkm->err != RD_KAFKA_RESP_ERR_NO_ERROR) { char* err_msg = (char *)rd_kafka_err2str(rkm->err); hv_stores(RETVAL, "err", newSViv(rkm->err)); hv_stores(RETVAL, "err_msg", newSVpvn(err_msg, strlen(err_msg))); } OUTPUT: RETVAL int krdev_event_type(rkev) rd_kafka_event_t* rkev CODE: RETVAL = rd_kafka_event_type(rkev); OUTPUT: RETVAL void krdev_DESTROY(rkev) rd_kafka_event_t* rkev CODE: rd_kafka_event_destroy(rkev); MODULE = Net::Kafka PACKAGE = Net::Kafka::Topic PREFIX = krdt_ PROTOTYPES: DISABLE HV* krdt_metadata(rkt, timeout_ms = 0) rd_kafka_topic_t* rkt int timeout_ms PREINIT: plrd_kafka_t* rdk; const rd_kafka_metadata_t *metadatap; rd_kafka_resp_err_t err; int t, p, b, r,i; rd_kafka_metadata_topic_t topic_md; rd_kafka_metadata_partition_t partition_md; rd_kafka_metadata_broker_t broker_md; CODE: rdk = (plrd_kafka_t *)rd_kafka_topic_opaque(rkt); err = rd_kafka_metadata(rdk->rk, 0, rkt, &metadatap, timeout_ms); if (err != RD_KAFKA_RESP_ERR_NO_ERROR) { croak("Error retrieving partition information: %s", rd_kafka_err2str(err)); } RETVAL = newHV(); hv_stores(RETVAL, "orig_broker_name", newSVpv(metadatap->orig_broker_name, strlen(metadatap->orig_broker_name))); hv_stores(RETVAL, "orig_broker_id", newSViv(metadatap->orig_broker_id)); /* array of hashrefs containing topic information */ AV* topic_AV = newAV(); for (t = 0; t < metadatap->topic_cnt; t++) { /* hash for each topic information */ HV * topic_HV = newHV(); topic_md = metadatap->topics[t]; hv_stores(topic_HV, "topic_name", newSVpv(topic_md.topic, strlen(topic_md.topic))); /* array of hashrefs containing partition information within each topic */ AV * partition_AV = newAV(); for (p = 0; p < topic_md.partition_cnt; p++) { /* hash for each partition */ HV * partition_HV = newHV(); partition_md = topic_md.partitions[p]; hv_stores(partition_HV, "id", newSViv(partition_md.id)); hv_stores(partition_HV, "leader", newSViv(partition_md.leader)); /* array containing replica broker ids for each partition */ AV * replica_AV = newAV(); for (r = 0; r < partition_md.replica_cnt; r++) { av_push(replica_AV, newSViv(partition_md.replicas[r])); } hv_stores(partition_HV, "replicas", newRV_noinc((SV*)replica_AV)); /* array containing isr broker ids for each partition */ AV* isr_AV = newAV(); for(i = 0; i < partition_md.isr_cnt; i++) { av_push(isr_AV, newSViv(partition_md.isrs[i])); } hv_stores(partition_HV, "isrs", newRV_noinc((SV*)isr_AV)); av_push(partition_AV, newRV_noinc((SV*)partition_HV)); } hv_stores(topic_HV, "partitions", newRV_noinc((SV*)partition_AV)); av_push(topic_AV, newRV_noinc((SV*)topic_HV)); } hv_stores(RETVAL, "topics", newRV_noinc((SV*)topic_AV)); /* array of hashrefs containing broker information */ AV* broker_AV = newAV(); for(b = 0; b < metadatap->broker_cnt; b++) { /* broker hash */ HV * broker_HV = newHV(); broker_md = metadatap->brokers[b]; hv_stores(broker_HV, "id", newSViv(broker_md.id)); hv_stores(broker_HV, "host", newSVpv(broker_md.host, strlen(broker_md.host))); hv_stores(broker_HV, "port", newSViv(broker_md.port)); av_push(broker_AV, newRV_noinc((SV*)broker_HV)); } hv_stores(RETVAL, "brokers", newRV_noinc((SV*)broker_AV)); /* Free up memory used by metadata struct */ rd_kafka_metadata_destroy(metadatap); OUTPUT: RETVAL void krdt_seek(rkt, partition, offset, timeout_ms = 0) rd_kafka_topic_t* rkt int partition long offset int timeout_ms PREINIT: rd_kafka_resp_err_t err; CODE: err = rd_kafka_seek(rkt, partition, offset, timeout_ms); if (err != RD_KAFKA_RESP_ERR_NO_ERROR) { croak("Error while seeking: %s", rd_kafka_err2str(err)); } void krdt_DESTROY(rkt) rd_kafka_topic_t* rkt CODE: plrd_kafka_t* krd = (plrd_kafka_t *)rd_kafka_topic_opaque(rkt); DEBUG2F(krd->debug_xs, "Destroying Net::Kafka::Topic %s", rd_kafka_topic_name(rkt)); rd_kafka_topic_destroy(rkt); MODULE = Net::Kafka PACKAGE = Net::Kafka::Message PREFIX = krdm_ PROTOTYPES: DISABLE int krdm_err(rd_msg) rd_kafka_message_t *rd_msg CODE: RETVAL = rd_msg->err; OUTPUT: RETVAL const char * krdm_err_name(rd_msg) rd_kafka_message_t *rd_msg CODE: RETVAL = rd_kafka_err2name(rd_msg->err); OUTPUT: RETVAL int krdm_partition(rd_msg) rd_kafka_message_t *rd_msg CODE: RETVAL = rd_msg->partition; OUTPUT: RETVAL const char* krdm_topic(rd_msg) rd_kafka_message_t *rd_msg CODE: RETVAL = rd_kafka_topic_name(rd_msg->rkt); OUTPUT: RETVAL SV* krdm_payload(rd_msg) rd_kafka_message_t *rd_msg CODE: RETVAL = newSVpvn(rd_msg->payload, rd_msg->len); OUTPUT: RETVAL SV* krdm_key(rd_msg) rd_kafka_message_t *rd_msg CODE: if (rd_msg->err == 0) { RETVAL = newSVpvn(rd_msg->key, rd_msg->key_len); } else { RETVAL = &PL_sv_undef; } OUTPUT: RETVAL void krdm_timestamp(rd_msg) rd_kafka_message_t *rd_msg PREINIT: rd_kafka_timestamp_type_t tstype; PPCODE: long timestamp = rd_kafka_message_timestamp(rd_msg, &tstype); EXTEND(SP, 2); PUSHs(sv_2mortal(newSViv(timestamp))); PUSHs(sv_2mortal(newSViv(tstype))); long krdm_offset(rd_msg) rd_kafka_message_t *rd_msg CODE: /* that will truncate offset if perl doesn't support 64bit ints */ RETVAL = rd_msg->offset; OUTPUT: RETVAL rd_kafka_headers_t* krdm_headers(rd_msg) rd_kafka_message_t* rd_msg PREINIT: rd_kafka_headers_t *hdrs; rd_kafka_resp_err_t err; CODE: err = rd_kafka_message_headers(rd_msg, &hdrs); if (err == RD_KAFKA_RESP_ERR_NO_ERROR) { /* making a copy here avoids ownership nightmares */ RETVAL = rd_kafka_headers_copy(hdrs); } else if (err == RD_KAFKA_RESP_ERR__NOENT) { XSRETURN_UNDEF; } else { croak("Error while getting headers: %s", rd_kafka_err2str(err)); } OUTPUT: RETVAL rd_kafka_headers_t* krdm_detach_headers(rd_msg) rd_kafka_message_t* rd_msg PREINIT: rd_kafka_headers_t *hdrs; rd_kafka_resp_err_t err; CODE: err = rd_kafka_message_detach_headers(rd_msg, &hdrs); if (err == RD_KAFKA_RESP_ERR_NO_ERROR) { RETVAL = hdrs; } else if (err == RD_KAFKA_RESP_ERR__NOENT) { XSRETURN_UNDEF; } else { croak("Error while getting headers: %s", rd_kafka_err2str(err)); } OUTPUT: RETVAL void krdm_DESTROY(rd_msg) rd_kafka_message_t *rd_msg CODE: rd_kafka_message_destroy( rd_msg ); MODULE = Net::Kafka PACKAGE = Net::Kafka::Headers PREFIX = krdh_ PROTOTYPES: DISABLE rd_kafka_headers_t* krdh_new(klass) SV *klass CODE: RETVAL = rd_kafka_headers_new(0); OUTPUT: RETVAL void krdh_add(hdrs, name, value) PREINIT: STRLEN name_len, value_len; rd_kafka_resp_err_t err; INPUT: rd_kafka_headers_t* hdrs const char *name = SvPV($arg, name_len); const char *value = SvPV($arg, value_len); CODE: err = rd_kafka_header_add(hdrs, name, name_len, value, value_len); if (err != RD_KAFKA_RESP_ERR_NO_ERROR) { croak("Error while adding header: %s", rd_kafka_err2str(err)); } void krdh_remove(hdrs, name) PREINIT: rd_kafka_resp_err_t err; INPUT: rd_kafka_headers_t* hdrs const char *name CODE: err = rd_kafka_header_remove(hdrs, name); if (err != RD_KAFKA_RESP_ERR_NO_ERROR) { croak("Error while removing header: %s", rd_kafka_err2str(err)); } void krdh_get_last(hdrs, name) PREINIT: rd_kafka_resp_err_t err; const void *value; size_t value_len; INPUT: rd_kafka_headers_t* hdrs const char *name PPCODE: err = rd_kafka_header_get_last(hdrs, name, &value, &value_len); if (err == RD_KAFKA_RESP_ERR_NO_ERROR) { PUSHs(sv_2mortal(newSVpvn(value, value_len))); } else if (err == RD_KAFKA_RESP_ERR__NOENT) { XSRETURN_UNDEF; } else { croak("Error while getting header: %s", rd_kafka_err2str(err)); } HV* krdh_to_hash(hdrs) rd_kafka_headers_t* hdrs PREINIT: rd_kafka_resp_err_t err; int i; const char *name; const void *value; size_t value_len; SV **slot; AV *value_list; CODE: RETVAL = newHV(); for (i = 0; ; ++i) { err = rd_kafka_header_get_all(hdrs, i, &name, &value, &value_len); if (err != RD_KAFKA_RESP_ERR_NO_ERROR) { break; } slot = hv_fetch(RETVAL, name, strlen(name), 1); if (slot == NULL) { /* should never happen */ croak("Error while building hash, lvalue fetch returned a NULL value"); } if (!SvOK(*slot)) { value_list = newAV(); SvUPGRADE(*slot, SVt_RV); SvROK_on(*slot); SvRV_set(*slot, (SV *) value_list); } else { value_list = (AV *) SvRV(*slot); } av_push(value_list, newSVpvn(value, value_len)); } OUTPUT: RETVAL void krdh_DESTROY(hdrs) rd_kafka_headers_t* hdrs CODE: rd_kafka_headers_destroy(hdrs); MODULE = Net::Kafka PACKAGE = Net::Kafka::Error PREFIX = krde_ PROTOTYPES: DISABLE HV * krde_rd_kafka_get_err_descs() PREINIT: const struct rd_kafka_err_desc* descs; size_t cnt; int i; CODE: rd_kafka_get_err_descs(&descs, &cnt); RETVAL = newHV(); for (i = 0; i < cnt; i++) { if (descs[i].name != NULL) { hv_store(RETVAL, descs[i].name, strnlen(descs[i].name, 1024), newSViv(descs[i].code), 0); } } OUTPUT: RETVAL const char* krde_to_string(code) int code CODE: RETVAL = rd_kafka_err2str(code); OUTPUT: RETVAL const char* krde_to_name(code) int code CODE: RETVAL = rd_kafka_err2name(code); OUTPUT: RETVAL int krde_last_error() CODE: RETVAL = rd_kafka_last_error(); OUTPUT: RETVAL MODULE = Net::Kafka PACKAGE = Net::Kafka::TopicPartitionList PREFIX = ktpl_ PROTOTYPES: DISABLE rd_kafka_topic_partition_list_t * ktpl_new(class, initial_size = 10) char *class int initial_size CODE: RETVAL = rd_kafka_topic_partition_list_new(initial_size); OUTPUT: RETVAL void ktpl_add(rktpl, topic, partition) rd_kafka_topic_partition_list_t *rktpl char *topic int partition PREINIT: rd_kafka_topic_partition_t *tp; CODE: tp = rd_kafka_topic_partition_list_find(rktpl, topic, partition); if (tp == NULL) { rd_kafka_topic_partition_list_add(rktpl, topic, partition); } rd_kafka_topic_partition_list_t* ktpl_copy(rktpl) rd_kafka_topic_partition_list_t* rktpl CODE: RETVAL = rd_kafka_topic_partition_list_copy(rktpl); OUTPUT: RETVAL void ktpl_get(rktpl, idx) rd_kafka_topic_partition_list_t *rktpl int idx PPCODE: if (!rktpl || idx < 0 || idx >= rktpl->cnt) { return; } char* tn = rktpl->elems[idx].topic; EXTEND(SP, 3); PUSHs(sv_2mortal(newSVpv(tn, strlen(tn)))); PUSHs(sv_2mortal(newSViv(rktpl->elems[idx].partition))); PUSHs(sv_2mortal(newSViv(rktpl->elems[idx].offset))); void ktpl_set_offset(rktpl, topic, partition, offset) rd_kafka_topic_partition_list_t *rktpl char *topic int partition long offset PREINIT: rd_kafka_resp_err_t err; CODE: err = rd_kafka_topic_partition_list_set_offset(rktpl, topic, partition, offset); if (err != RD_KAFKA_RESP_ERR_NO_ERROR) { croak("Error setting offset: %s", rd_kafka_err2str(err)); } void ktpl_offset(rktpl, topic, partition) rd_kafka_topic_partition_list_t *rktpl char *topic int partition PREINIT: rd_kafka_topic_partition_t *tp; PPCODE: tp = rd_kafka_topic_partition_list_find(rktpl, topic, partition); if (tp == NULL) { XSRETURN_EMPTY; return; } ST(0) = sv_2mortal(newSViv(tp->offset)); XSRETURN(1); int ktpl_del(rktpl, topic, partition) rd_kafka_topic_partition_list_t *rktpl char *topic int partition CODE: RETVAL = rd_kafka_topic_partition_list_del(rktpl, topic, partition); OUTPUT: RETVAL int ktpl_size(rktpl) rd_kafka_topic_partition_list_t *rktpl CODE: RETVAL = (rktpl == NULL ? 0 : rktpl->cnt); OUTPUT: RETVAL void ktpl_DESTROY(rktpl) rd_kafka_topic_partition_list_t *rktpl CODE: rd_kafka_topic_partition_list_destroy(rktpl); Net-Kafka-1.06/PaxHeader/.travis.yml000644 777777 777777 00000000357 13554325704 022604 xustar00amironovCORPAD\Domain Users000000 000000 17 gid=697783653 18 uid=2117034315 30 mtime=1571924932.928728101 29 ctime=1572341356.22434428 30 atime=1572341356.432197186 48 LIBARCHIVE.creationtime=1571924920.527072675 23 SCHILY.dev=16777220 26 SCHILY.ino=12902123463 18 SCHILY.nlink=3 Net-Kafka-1.06/.travis.yml000644 €~/aK€)—Ue00000002371 13554325704 021230 0ustar00amironovCORPAD\Domain Users000000 000000 language: "perl" perl: - "5.22" - "5.26" - "5.28" - "5.30" sudo: required env: - KAFKA_VERSION=2.3.0 LIBRDKAFKA_VERSION=1.1.0~1confluent5.3.1-1 KAFKA_BOOTSTRAP_SERVERS=localhost:9092 KAFKA_TEST_TOPIC=test KAFKA_TEST_TOPIC_PARTITIONS=8 KAFKA_CONSUMER_GROUP_PREFIX=net-kafka before_install: - wget -qO - https://packages.confluent.io/deb/5.3/archive.key | sudo apt-key add - - sudo add-apt-repository "deb [arch=amd64] https://packages.confluent.io/deb/5.3 stable main" - sudo apt-get update -q - sudo apt-get install librdkafka-dev=$LIBRDKAFKA_VERSION - mkdir kafka && cd kafka - curl "https://www.apache.org/dist/kafka/$KAFKA_VERSION/kafka_2.12-$KAFKA_VERSION.tgz" -o kafka.tgz - tar -xvzf kafka.tgz --strip 1 - echo -e "\nauto.create.topics.enable=false" >> config/server.properties - bin/zookeeper-server-start.sh -daemon config/zookeeper.properties - bin/kafka-server-start.sh -daemon config/server.properties - bin/kafka-topics.sh --create --bootstrap-server $KAFKA_BOOTSTRAP_SERVERS --replication-factor 1 --partitions $KAFKA_TEST_TOPIC_PARTITIONS --topic $KAFKA_TEST_TOPIC - bin/kafka-topics.sh --list --bootstrap-server $KAFKA_BOOTSTRAP_SERVERS - cd ../ - cpanm ExtUtils::PkgConfig install: - cpanm -v --installdeps --notest .Net-Kafka-1.06/PaxHeader/netkafka.c000644 777777 777777 00000000277 13554325634 022426 xustar00amironovCORPAD\Domain Users000000 000000 17 gid=697783653 18 uid=2117034315 30 mtime=1571924892.688538691 30 ctime=1572341356.232107988 29 atime=1572341356.43186657 23 SCHILY.dev=16777220 26 SCHILY.ino=12902123331 18 SCHILY.nlink=3 Net-Kafka-1.06/netkafka.c000644 €~/aK€)—Ue00000020524 13554325634 021051 0ustar00amironovCORPAD\Domain Users000000 000000 #include "netkafka.h" static rd_kafka_topic_partition_list_t *krd_topic_partition_list_copy_shrinked(rd_kafka_topic_partition_list_t * ktpl) { int orig_size = ktpl->size; ktpl->size = ktpl->cnt; rd_kafka_topic_partition_list_t *shrinked_ktpl = rd_kafka_topic_partition_list_copy(ktpl); ktpl->size = orig_size; return shrinked_ktpl; } static int krd_fill_topic_config(pTHX_ plrd_kafka_t * krd, rd_kafka_topic_conf_t * topic_conf, char *errstr, HV * params) { rd_kafka_conf_res_t res; HE *he; hv_iterinit(params); while ((he = hv_iternext(params)) != NULL) { STRLEN len; char *key = HePV(he, len); SV *val = HeVAL(he); char *strval = SvPV(val, len); DEBUGF(krd->debug_xs, "Setting topic config '%s' to '%s'", key, strval); res = rd_kafka_topic_conf_set(topic_conf, key, strval, errstr, ERRSTR_SIZE); if (res != RD_KAFKA_CONF_OK) return 1; } return 0; } static void cns_commit_cb(rd_kafka_t * rk, rd_kafka_resp_err_t err, rd_kafka_topic_partition_list_t * offsets, void *opaque) { dTHX; plrd_kafka_t *krd = (plrd_kafka_t *) opaque; dSP; ENTER; SAVETMPS; PUSHMARK(SP); EXTEND(SP, 3); DEBUG2F(krd->debug_xs, "Commit callback signaling"); SV *sv_offsets = sv_newmortal(); sv_setref_pv(sv_offsets, "Net::Kafka::TopicPartitionList", (void *)krd_topic_partition_list_copy_shrinked(offsets)); PUSHs(sv_2mortal(newSVsv(krd->self))); PUSHs(sv_2mortal(newSViv(err))); PUSHs(sv_offsets); PUTBACK; call_sv(krd->commit_cb, G_DISCARD); FREETMPS; LEAVE; } static void cns_rebalance_cb(rd_kafka_t * rk, rd_kafka_resp_err_t err, rd_kafka_topic_partition_list_t * partitions, void *opaque) { dTHX; plrd_kafka_t *krd = (plrd_kafka_t *) opaque; dSP; ENTER; SAVETMPS; PUSHMARK(SP); EXTEND(SP, 3); DEBUG2F(krd->debug_xs, "Rebalance callback signaling"); SV *err_tmp; SV *sv_partitions = sv_newmortal(); sv_setref_pv(sv_partitions, "Net::Kafka::TopicPartitionList", (void *)krd_topic_partition_list_copy_shrinked(partitions)); PUSHs(sv_2mortal(newSVsv(krd->self))); PUSHs(sv_2mortal(newSViv(err))); PUSHs(sv_partitions); PUTBACK; call_sv(krd->rebalance_cb, G_DISCARD | G_EVAL); SPAGAIN; err_tmp = ERRSV; if (SvTRUE(err_tmp)) { rd_kafka_assign(rk, NULL); croak("%s\n", SvPV_nolen(err_tmp)); } PUTBACK; FREETMPS; LEAVE; } static void cns_error_cb(rd_kafka_t * rk, int err, const char *reason, void *opaque) { dTHX; plrd_kafka_t *krd = (plrd_kafka_t *) opaque; DEBUG2F(krd->debug_xs, "Error callback signaling"); dSP; ENTER; SAVETMPS; PUSHMARK(SP); EXTEND(SP, 3); PUSHs(sv_2mortal(newSVsv(krd->self))); PUSHs(sv_2mortal(newSViv(err))); PUSHs(sv_2mortal(newSVpv(reason, strlen(reason)))); PUTBACK; call_sv(krd->error_cb, G_DISCARD); FREETMPS; LEAVE; } static int cns_stats_cb(rd_kafka_t * rk, char *json, size_t json_len, void *opaque) { dTHX; plrd_kafka_t *krd = (plrd_kafka_t *) opaque; DEBUG2F(krd->debug_xs, "Stats callback signaling"); dSP; ENTER; SAVETMPS; PUSHMARK(SP); EXTEND(SP, 3); PUSHs(sv_2mortal(newSVsv(krd->self))); PUSHs(sv_2mortal(newSVpvn(json, json_len))); PUTBACK; call_sv(krd->stats_cb, G_DISCARD); FREETMPS; LEAVE; return 0; /* libkrdafka can free the JSON */ } void cns_init(plrd_kafka_t * krd, rd_kafka_conf_t * conf) { if (krd->stats_cb != NULL) { DEBUGF(krd->debug_xs, "Setting custom consumer stats callback"); rd_kafka_conf_set_stats_cb(conf, cns_stats_cb); } if (krd->error_cb != NULL) { DEBUGF(krd->debug_xs, "Setting custom consumer error callback"); rd_kafka_conf_set_error_cb(conf, cns_error_cb); } if (krd->rebalance_cb != NULL) { DEBUGF(krd->debug_xs, "Setting custom rebalance callback"); rd_kafka_conf_set_rebalance_cb(conf, cns_rebalance_cb); } if (krd->commit_cb != NULL) { DEBUGF(krd->debug_xs, "Setting custom commit callback"); rd_kafka_conf_set_offset_commit_cb(conf, cns_commit_cb); } } void prd_init(plrd_kafka_t * krd, rd_kafka_conf_t * conf) { if (krd->queue_fd == -1) { croak("'queue_fd' is missing from params"); } if (krd->stats_cb != NULL || krd->error_cb != NULL || krd->rebalance_cb != NULL || krd->commit_cb != NULL) { croak("Net::Kafka::Producer must not pass any perl callbacks"); } DEBUGF(krd->debug_xs, "Subscribing producer to RD_KAFKA_EVENT_DR | RD_KAFKA_EVENT_ERROR | RD_KAFKA_EVENT_STATS events"); rd_kafka_conf_set_events(conf, RD_KAFKA_EVENT_DR | RD_KAFKA_EVENT_ERROR | RD_KAFKA_EVENT_STATS); } void prd_start(plrd_kafka_t * krd) { krd->queue = rd_kafka_queue_get_main(krd->rk); rd_kafka_queue_io_event_enable(krd->queue, krd->queue_fd, "1", 1); DEBUGF(krd->debug_xs, "Created IO event queue with fd %d", krd->queue_fd); } void cns_start(plrd_kafka_t * krd) { // redirect rd_kafka_poll to consumer_poll() rd_kafka_poll_set_consumer(krd->rk); } void krd_close_handles(plrd_kafka_t * krd) { if (krd->is_closed) { return; } rd_kafka_t *rk = krd->rk; if (krd->type == RD_KAFKA_PRODUCER) { DEBUGF(krd->debug_xs, "Closing producer..."); prd_stop(krd); DEBUGF(krd->debug_xs, "Closed producer."); } else { DEBUGF(krd->debug_xs, "Closing consumer..."); cns_stop(krd); DEBUGF(krd->debug_xs, "Closed consumer."); } DEBUGF(krd->debug_xs, "Closing rk handle..."); rd_kafka_destroy(rk); DEBUGF(krd->debug_xs, "Closed rk handle."); krd->is_closed = 1; } void prd_stop(plrd_kafka_t * krd) { DEBUGF(krd->debug_xs, "Closing IO event queue..."); rd_kafka_queue_destroy(krd->queue); DEBUGF(krd->debug_xs, "Closed IO event queue."); } void cns_stop(plrd_kafka_t * krd) { rd_kafka_consumer_close(krd->rk); } rd_kafka_conf_t *krd_parse_config(pTHX_ plrd_kafka_t * krd, HV * params) { char errstr[ERRSTR_SIZE]; rd_kafka_conf_t *conf = rd_kafka_conf_new(); rd_kafka_topic_conf_t *topic_conf = rd_kafka_topic_conf_new(); rd_kafka_conf_res_t res; HE *he; krd->debug_xs = 0; krd->queue_fd = -1; if (params) { hv_iterinit(params); SV *debug_xs = hv_delete(params, "debug.xs", strlen("debug.xs"), 0); if (debug_xs != NULL && SvOK(debug_xs)) { krd->debug_xs = SvIV(debug_xs); DEBUGF(krd->debug_xs, "XS debug enabled: %d", krd->debug_xs); } while ((he = hv_iternext(params)) != NULL) { STRLEN len; char *key = HePV(he, len); SV *val = HeVAL(he); if (strncmp(key, "rebalance_cb", len) == 0) { krd->rebalance_cb = newSVsv(val); } else if (strncmp(key, "offset_commit_cb", len) == 0) { krd->commit_cb = newSVsv(val); } else if (strncmp(key, "error_cb", len) == 0) { krd->error_cb = newSVsv(val); } else if (strncmp(key, "stats_cb", len) == 0) { krd->stats_cb = newSVsv(val); } else if (strncmp(key, "queue_fd", len) == 0) { krd->queue_fd = SvIV(val); } else if (strncmp(key, "default_topic_config", len) == 0) { if (!SvROK(val) || strncmp(sv_reftype(SvRV(val), 0), "HASH", 5) != 0) { strncpy(errstr, "default_topic_config must be a hash reference", ERRSTR_SIZE); goto CROAK; } if (krd_fill_topic_config(aTHX_ krd, topic_conf, errstr, (HV *) SvRV(val)) != 0) goto CROAK; } else { // set named configuration property char *strval = SvPV(val, len); DEBUGF(krd->debug_xs, "Setting global config '%s' to '%s'", key, strval); res = rd_kafka_conf_set(conf, key, strval, errstr, ERRSTR_SIZE); if (res != RD_KAFKA_CONF_OK) goto CROAK; } } } rd_kafka_conf_set_opaque(conf, (void *)krd); rd_kafka_topic_conf_set_opaque(topic_conf, (void *)krd); rd_kafka_conf_set_default_topic_conf(conf, topic_conf); return conf; CROAK: rd_kafka_conf_destroy(conf); rd_kafka_topic_conf_destroy(topic_conf); croak("%s", errstr); return NULL; } Net-Kafka-1.06/PaxHeader/META.json000644 777777 777777 00000000360 13556003155 022101 xustar00amironovCORPAD\Domain Users000000 000000 17 gid=697783653 18 uid=2117034315 30 mtime=1572341357.059975593 30 ctime=1572341357.063418947 30 atime=1572341357.103468154 48 LIBARCHIVE.creationtime=1572341356.549985052 23 SCHILY.dev=16777220 26 SCHILY.ino=12902637759 18 SCHILY.nlink=1 Net-Kafka-1.06/META.json000644 €~/aK€)—Ue00000003630 13556003155 020532 0ustar00amironovCORPAD\Domain Users000000 000000 { "abstract" : "High-performant Perl client for Apache Kafka", "author" : [ "Rajesh Amradi ", "Michael Austin ", "Ankit Bhatnagar ", "Jaap Eldering ", "Osama Elsayed ", "Eduardo Dalla Favera ", "Pavel Gurkov ", "Przemyslaw Iskra ", "Alex Mironov ", "Pavel Shaydo " ], "dynamic_config" : 1, "generated_by" : "ExtUtils::MakeMaker version 7.34, CPAN::Meta::Converter version 2.150010", "license" : [ "perl_5" ], "meta-spec" : { "url" : "http://search.cpan.org/perldoc?CPAN::Meta::Spec", "version" : 2 }, "name" : "Net-Kafka", "no_index" : { "directory" : [ "t", "inc" ] }, "prereqs" : { "build" : { "requires" : { "ExtUtils::MakeMaker" : "0" } }, "configure" : { "requires" : { "ExtUtils::MakeMaker" : "0", "ExtUtils::PkgConfig" : "0" } }, "runtime" : { "requires" : { "AnyEvent" : "0", "AnyEvent::XSPromises" : "0", "Test::More" : "0", "perl" : "5.022000" } } }, "release_status" : "stable", "resources" : { "bugtracker" : { "web" : "https://github.com/bookingcom/perl-Net-Kafka/issues" }, "homepage" : "https://github.com/bookingcom/perl-Net-Kafka", "repository" : { "type" : "git", "url" : "https://github.com/bookingcom/perl-Net-Kafka.git", "web" : "https://github.com/bookingcom/perl-Net-Kafka" } }, "version" : "1.06", "x_serialization_backend" : "JSON::PP version 2.97001" } Net-Kafka-1.06/lib/PaxHeader/Net000755 777777 777777 00000000300 13556003154 021673 xustar00amironovCORPAD\Domain Users000000 000000 17 gid=697783653 18 uid=2117034315 30 mtime=1572341356.232556558 30 ctime=1572341356.232556558 30 atime=1572341356.953256846 23 SCHILY.dev=16777220 26 SCHILY.ino=12902637744 18 SCHILY.nlink=4 Net-Kafka-1.06/lib/Net/000755 €~/aK€)—Ue00000000000 13556003154 020402 5ustar00amironovCORPAD\Domain Users000000 000000 Net-Kafka-1.06/lib/Net/PaxHeader/Kafka000755 777777 777777 00000000300 13556003154 022710 xustar00amironovCORPAD\Domain Users000000 000000 17 gid=697783653 18 uid=2117034315 30 mtime=1572341356.231353443 30 ctime=1572341356.231353443 30 atime=1572341356.893566156 23 SCHILY.dev=16777220 26 SCHILY.ino=12902637745 18 SCHILY.nlink=5 Net-Kafka-1.06/lib/Net/Kafka/000755 €~/aK€)—Ue00000000000 13556003154 021417 5ustar00amironovCORPAD\Domain Users000000 000000 Net-Kafka-1.06/lib/Net/PaxHeader/Kafka.pm000644 777777 777777 00000000300 13556002700 023314 xustar00amironovCORPAD\Domain Users000000 000000 17 gid=697783653 18 uid=2117034315 30 mtime=1572341184.378060254 30 ctime=1572341356.232763063 30 atime=1572341356.474201924 23 SCHILY.dev=16777220 26 SCHILY.ino=12902636763 18 SCHILY.nlink=2 Net-Kafka-1.06/lib/Net/Kafka.pm000644 €~/aK€)—Ue00000036330 13556002700 021756 0ustar00amironovCORPAD\Domain Users000000 000000 package Net::Kafka; use strict; use warnings; use constant; require Exporter; our @ISA = qw/Exporter/; our (@EXPORT_OK, %EXPORT_TAGS); BEGIN { our $VERSION = "1.06"; my $XS_VERSION = $VERSION; $VERSION = eval $VERSION; require XSLoader; XSLoader::load('Net::Kafka', $XS_VERSION); my $errors = Net::Kafka::Error::rd_kafka_get_err_descs(); no strict 'refs'; for ( keys %$errors ) { *{__PACKAGE__ . "::RD_KAFKA_RESP_ERR_$_"} = eval "sub() { $errors->{$_} }"; push @EXPORT_OK, "RD_KAFKA_RESP_ERR_$_"; } my %constants = ( RD_KAFKA_TOPIC_CONFIG_KEYS => [qw/ request.required.acks acks request.timeout.ms message.timeout.ms delivery.timeout.ms queuing.strategy produce.offset.report partitioner partitioner_cb msg_order_cmp opaque compression.codec compression.type compression.level auto.offset.reset offset.store.path offset.store.sync.interval.ms offset.store.method consume.callback.max.messages /], DEFAULT_METADATA_TIMEOUT => 1000, ); push @EXPORT_OK => keys %constants; push @EXPORT_OK => (qw/ RD_KAFKA_VERSION RD_KAFKA_PARTITION_UA RD_KAFKA_OFFSET_END RD_KAFKA_VERSION RD_KAFKA_PRODUCER RD_KAFKA_CONSUMER RD_KAFKA_TIMESTAMP_NOT_AVAILABLE RD_KAFKA_TIMESTAMP_CREATE_TIME RD_KAFKA_TIMESTAMP_LOG_APPEND_TIME RD_KAFKA_PARTITION_UA RD_KAFKA_OFFSET_BEGINNING RD_KAFKA_OFFSET_END RD_KAFKA_OFFSET_STORED RD_KAFKA_OFFSET_INVALID RD_KAFKA_OFFSET_STORED RD_KAFKA_EVENT_NONE RD_KAFKA_EVENT_DR RD_KAFKA_EVENT_FETCH RD_KAFKA_EVENT_LOG RD_KAFKA_EVENT_ERROR RD_KAFKA_EVENT_REBALANCE RD_KAFKA_EVENT_OFFSET_COMMIT RD_KAFKA_EVENT_STATS /); %EXPORT_TAGS = ( all => [ @EXPORT_OK ], ); constant->import({ %constants }); } sub partitions_for { my ($self, $topic, $timeout_ms) = @_; $timeout_ms //= DEFAULT_METADATA_TIMEOUT; my $metadata = $self->topic($topic)->metadata($timeout_ms); my @topics = grep { $_->{topic_name} eq $topic } @{ $metadata->{topics} }; die sprintf("Unable to fetch metadata for topic %s", $topic) if scalar @topics != 1; my $topic_metadata = shift @topics; die sprintf("Unable to fetch partitions metadata for topic %s", $topic) unless @{ $topic_metadata->{partitions} }; return $topic_metadata->{partitions}; } 1; =head1 NAME =for markdown [![Build Status](https://travis-ci.com/bookingcom/perl-Net-Kafka.svg?branch=master)](https://travis-ci.com/bookingcom/perl-Net-Kafka) Net::Kafka - High-performant Perl client for Apache Kafka =head1 SYNOPSIS use Net::Kafka::Producer; use Net::Kafka::Consumer; use AnyEvent; # Produce 1 message into "my_topic" my $condvar = AnyEvent->condvar; my $producer = Net::Kafka::Producer->new( 'bootstrap.servers' => 'localhost:9092' ); $producer->produce( payload => "message", topic => "my_topic" )->then(sub { my $delivery_report = shift; $condvar->send; print "Message successfully delivered with offset " . $delivery_report->{offset}; }, sub { my $error = shift; $condvar->send; die "Unable to produce a message: " . $error->{error} . ", code: " . $error->{code}; }); $condvar->recv; # Consume message from "my_topic" my $consumer = Net::Kafka::Consumer->new( 'bootstrap.servers' => 'localhost:9092', 'group.id' => 'my_consumer_group', 'enable.auto.commit' => 'true', ); $consumer->subscribe( [ "my_topic" ] ); while (1) { my $msg = $kafka->consumer_poll(1000); if ($msg) { if ( $msg->err ) { say "Error: ", Net::Kafka::Error::to_string($err); } else { say $msg->payload; } } } =head1 DESCRIPTION This module provides Perl bindings to L C client library. It is heavily inspired by L module originally developed by Pavel Shaydo. Please refer to the following modules documentation in order to understand how to use it: =over =item * C - asynchronous producer interface =item * C - consumer interface that supports both Simple and Distributed modes =back =head1 REQUIREMENTS =over =item * GNU make =item * librdkafka >= 1.0.0 =back =head1 INSTALLATION First install librdkafka (L). =head2 BUILD FROM CPAN cpanm install Net::Kafka =head2 BUILD FROM SOURCE Sources are available on Github: L. perl Makefile.pl make make test make install =head1 Net::Kafka::Producer The Net::Kafka::Producer module provides interface to librdkafka's producer methods. It utilizes signal pipes, L watcher and L to make its behaviour asynchronous. Taking that into consideration you need to make sure to properly create condvar and C/C in order to collect all outstanding promises. It is highly suggested to familirize yourself with both L and L modules. See L for example. =head2 METHODS =over 4 =item new() my $producer = Net::Kafka::Producer->new( 'bootstrap.servers' => 'localhost:9092' ); Create an instance of Net::Kafka::Producer. Accept hash where keys are equal to property names of librdkafka (see L). Note that only C and C callbacks are supported for Producer. Message delivery reports are served automatically through C based C method (see below). =item produce() my $promise = $producer->produce( payload => "my_message", topic => "my_topic", key => "my_key", # optional timestamp => 1234567, # optional, if not specified current local timestamp will be used partition => 0 # optional, if not specified internal librdkafka partitioner will be used headers => $headers, # Optional, see Net::Kafka::Headers )->then(sub { my $delivery_report = shift; print "Message is sent with offset " . $delivery_report->{offset}; })->catch(sub { my $error = shift; print $error->{error} . "\n"; }); Sends a message to Kafka. Accepts hash with parameters. Returns back an instance of C that will be resolved/rejected later. In case message is successfully send "resolve" callback will receive a delievry report in the form of the hash that contains C, C and C. If message delivery has failed "reject" callback will receive a hash that contains C (a human readable error description) and (optionally) C that is equal to librdkafka's error code. All error codes are mapped and exported by C module as constants (e.g. C) for simplicity. =item partitions_for() my $partitions = $producer->partitions_for("my_topic", $timeout_ms); Returns an C that contains partition metadata information about the given topic (leader, replicas, ISR replicas); =item close() $producer->close(); Explicitly closees C instance and underlying librdkafka handles. =back =head1 Net::Kafka::Consumer The Net::Kafka::Consumer class provides interface to librdkafka's consumer functionality. It supports both "distributed" (subscription based) and "simple" (manual partition assignment) modes of work. =head2 METHODS =over 4 =item new() my $consumer = Net::Kafka::Consumer->new( 'bootstrap.servers' => 'localhost:9092', 'group.id' => "my_consumer_group", 'enable.auto.commit' => "true", ); Create an instance of Net::Kafka::Consumer. Accept hash where keys are equal to property names of librdkafka (see L). Note that not all callbacks are supported at the moment. Supported ones are: C, C, C and C. =item subscribe() $consumer->subscribe([ 'my_topic' ]); Subscribe to topic set using balanced consumer groups. The main entry-point for "distributed" consumer mode - partitions will be assigned automatically using Kafka's GroupApi semantics. Wildcard/regex topics are supported so matching topics will be added to the subscription list. =item unsubscribe() $consumer->unsubscribe(); Unsubscribe from the current subscription set. =item assign() # manually assign partitions 0 and 1 to be consumed my $tp_list = Net::Kafka::TopicPartitionList->new(); $tp_list->add("my_topic", 0); $tp_list->add("my_topic", 1); $consumer->assign($tp_list); Atomic assignment of partitions to consume. The main entry-point for "simple" consumer mode - partitions are assigned manually. =item poll() my $message = $consumer->poll($timeout_ms); Poll the consumer for messages or events. Returns instance of C. Will block for at most C milliseconds. An application should make sure to call C at regular intervals. =item committed() my $tp_list = Net::Kafka::TopicPartitionList->new(); $tp_list->add("my_topic", 0); $consumer->committed($tp_list); my $offset = $tp_list->offset("my_topic_, 0); Retrieve committed offsets for topics+partitions. =item offsets_for_times() my $tp_list = Net::Kafka::TopicPartitionList->new(); $tp_list->add("my_topic", 0); $tp_list->set_offset("my_topic", 0, 958349923); # timestamp if passed through offset field $consumer->offsets_for_times($tp_list); my $offset = $tp_list->offset("my_topic"); Look up the offsets for the given partitions by timestamp. =item pause() my $tp_list = Net::Kafka::TopicPartitionList->new(); $tp_list->add("my_topic", 0); $consumer->pause($tp_list); # pauses consumption of partition 0 of "my_topic" Pause consumption for the provided list of partitions. =item resume() my $tp_list = Net::Kafka::TopicPartitionList->new(); $tp_list->add("my_topic", 0); $consumer->resume($tp_list); # resumes consumption of partition 0 of "my_topic" Resume consumption for the provided list of partitions. =item subscription() my $topics = $consumer->subscription(); Returns the current topic subscription =item partitions_for() my $partitions = $producer->partitions_for("my_topic"); Returns an C that contains partition metadata information about the given topic (leader, replicas, ISR replicas); =item commit() $consumer->commit(); # commit current partition assignment (blocking call) $consumer->commit(1); # commit current partition assignment (non-blocking call) my $tp_list = Net::Kafka::TopicPartitionList->new(); $tp_list->add("my_topic", 0); $tp_list->set_offset("my_topic", 0, 12345); $consumer->commit(0, $tp_list); # commit $tp_list assignment (blocking call); Commit offsets on broker for the provided list of partitions. If no partitions provided current assignment is committed instead. =item commit_message(); my $message = $consumer->poll(1000); $consumer->commit_message(0, $message); # commit message (blocking call); $consumer->commit_message(1, $message); # commit message (non-blocking call); Commit message's offset on broker for the message's partition. =item position() my $position_list = Net::Kafka::TopicPartitionList->new(); $position_list->add("my_topic", 0); $consumer->position($position_list); my $position = $position_list->offset("my_topic", 0); Retrieve current positions (offsets) for topics+partitions. The \p offset field of each requested partition will be set to the offset of the last consumed message + 1, or RD_KAFKA_OFFSET_INVALID in case there was no previous message. Note: in this context the last consumed message is the offset consumed by the current librdkafka instance and, in case of rebalancing, not necessarily the last message fetched from the partition. =item seek() $consumer->seek("my_topic", 0, 12345); # seek partition 0 of "my_topic" to offset "12345" $consumer->seek("my_topic", 0, RD_KAFKA_OFFSET_BEGINNING); # seek to the beginning of "my_topic" partition 0 $consumer->seek("my_topic", 0, RD_KAFKA_OFFSET_END); # seek to the end of "my_topic" partition 0 Seek consumer for topic+partition to offset which is either an absolute or logical offset. =item query_watermark_offsets() my ($low, $high) = $consumer->query_watermark_offsets("my_topic", 0); Queries Kafka Broker for lowest and highest watermark offsets in the given topic-partition. =item close() $consumer->close(); Close all consumer handles. Make sure to call it before destroying your application to make sure that all outstanding requests to be flushed. =back =head1 Net::Kafka::Message This class maps to C structure from librdkafka and represents message or event. Objects of this class have the following methods: =over 4 =item err() return error code from the message =item topic() return topic name =item partition() return partition number =item offset() return offset. Note, that the value is truncated to 32 bit if your perl doesn't support 64 bit integers. =item key() return message key =item payload() return message payload =item headers() return a copy of message headers =item detach_headers() return message headers and removes them from the message =back =head1 Net::Kafka::Headers This class contains a list of Kafka headers (it allows duplicates). Objects of this class have the following methods: =over 4 =item new() create a new instance =item add(name, value) append a new name/value pair to the header list =item remove(name) remove all headers with the given name, if any =item get_last(name) return the last value associated with a given name =item to_hash() return an hash-of-arrays containing all headers =back =head1 Net::Kafka::Err This class provides static methods to convert error codes into names and descriptions. =over 4 =item rd_kafka_get_err_descs() rd_kafka_get_err_descs() returns a hash mapping error codes to description strings. =item to_string() to_string($code) return the description string for this error code. =item to_name() to_name($code) return the name of this error code. =back =head1 CAVEATS Message offset is truncated to 32 bit if perl is compiled without support for 64 bit integers. =head1 SEE ALSO =over =item * L =item * L =back =head1 LICENSE AND COPYRIGHT Copyright (C) 2016, 2017 Pavel Shaydo Copyright (C) 2018, 2019 Booking.com This program is free software; you can redistribute it and/or modify it under the terms of either: the GNU General Public License as published by the Free Software Foundation; or the Artistic License. See http://dev.perl.org/licenses/ for more information. =cut Net-Kafka-1.06/lib/Net/Kafka/PaxHeader/Util.pm000644 777777 777777 00000000300 13554325634 024245 xustar00amironovCORPAD\Domain Users000000 000000 17 gid=697783653 18 uid=2117034315 30 mtime=1571924892.680242549 30 ctime=1572341356.231016369 30 atime=1572341356.474283717 23 SCHILY.dev=16777220 26 SCHILY.ino=12902123327 18 SCHILY.nlink=3 Net-Kafka-1.06/lib/Net/Kafka/Util.pm000644 €~/aK€)—Ue00000001145 13554325634 022703 0ustar00amironovCORPAD\Domain Users000000 000000 package Net::Kafka::Util; use strict; use warnings; use Net::Kafka qw/RD_KAFKA_TOPIC_CONFIG_KEYS/; my $topic_config_key_lookup = { map { $_ => 1 } @{ +RD_KAFKA_TOPIC_CONFIG_KEYS } }; sub is_topic_config_key { my $key = shift; return exists $topic_config_key_lookup->{$key} ? 1 : 0; } sub build_config { my $args = shift; my $config = {}; foreach my $key (keys %$args) { if (is_topic_config_key($key)) { $config->{default_topic_config}{$key} = $args->{$key}; } else { $config->{$key} = $args->{$key}; } } return $config; } 1; Net-Kafka-1.06/lib/Net/Kafka/PaxHeader/Producer.pm000644 777777 777777 00000000300 13556002700 025077 xustar00amironovCORPAD\Domain Users000000 000000 17 gid=697783653 18 uid=2117034315 30 mtime=1572341184.380551467 30 ctime=1572341356.228887676 30 atime=1572341356.474309056 23 SCHILY.dev=16777220 26 SCHILY.ino=12902636765 18 SCHILY.nlink=2 Net-Kafka-1.06/lib/Net/Kafka/Producer.pm000644 €~/aK€)—Ue00000014523 13556002700 023541 0ustar00amironovCORPAD\Domain Users000000 000000 package Net::Kafka::Producer; use strict; use warnings; use POSIX; use IO::Handle; use Net::Kafka qw/ RD_KAFKA_PRODUCER RD_KAFKA_PARTITION_UA RD_KAFKA_EVENT_DR RD_KAFKA_EVENT_NONE RD_KAFKA_EVENT_STATS RD_KAFKA_EVENT_ERROR /; use Net::Kafka::Util; use AnyEvent::XSPromises qw/deferred/; use Scalar::Util qw/weaken/; use AnyEvent; use constant { DEFAULT_ACKS => 1, DEFAULT_ERROR_CB => sub { my ($self, $err, $msg) = @_; warn sprintf("WARNING: Net::Kafka::Producer: %s (%s)", $msg, $err); }, }; sub new { my ($class, %args) = @_; my $rdkafka_version = Net::Kafka::rd_kafka_version(); pipe my $r, my $w or die "could not create a signalq pipe: $!"; $r->autoflush(1); $w->autoflush(1); my $error_cb = delete $args{error_cb} // DEFAULT_ERROR_CB; my $stats_cb = delete $args{stats_cb}; my $config = Net::Kafka::Util::build_config({ 'acks' => DEFAULT_ACKS, 'queue_fd' => $w->fileno, %args }); my $kafka = delete $args{kafka} || Net::Kafka->new(RD_KAFKA_PRODUCER, $config); my $self = bless { _kafka => $kafka, _max_id => 0, _in_flight => {}, _read_queue_fd => $r, _write_queue_fd => $w, _error_cb => $error_cb, _stats_cb => $stats_cb, _watcher => undef, _is_closed => 0, }, $class; $self->_setup_signal_watcher(); $self->_process_events(); # process existing events in the queue return $self; } sub _setup_signal_watcher { my $self = shift; weaken(my $self_ = $self); $self->{_watcher} = AnyEvent->io( fh => $self_->{_read_queue_fd}, poll => "r", cb => sub { $self_->_signal_watcher_cb() } ); } sub _signal_watcher_cb { my $self = shift; return if $self->{_is_closed}; my $signals_count = sysread $self->{_read_queue_fd}, my $signal, 1; if (! defined $signals_count) { $! == EAGAIN and return; warn "sysread failed: $!"; } else { $self->_process_events(); } } sub _process_events { my $self = shift; while (my $event = $self->{_kafka}->queue_poll()) { if (! defined $event) { warn sprintf "ERROR: empty event received"; next; } my $event_type = $event->event_type(); if ($event_type == RD_KAFKA_EVENT_NONE) { # do nothing } elsif ($event_type == RD_KAFKA_EVENT_DR) { $self->_process_event_delivery_reports($event); } elsif ($event_type == RD_KAFKA_EVENT_STATS) { $self->_process_event_stats($event); } elsif ($event_type == RD_KAFKA_EVENT_ERROR) { $self->_process_event_error($event); } else { warn sprintf "ERROR: unknown event type %s received", $event_type; } } } sub _process_event_error { my ($self, $event) = @_; my $err = $event->event_error(); my $err_msg = $event->event_error_string(); $self->{_error_cb}->( $self, $err, $err_msg ) if defined $self->{_error_cb}; } sub _process_event_stats { my ($self, $event) = @_; my $stats = $event->event_stats(); $self->{_stats_cb}->( $self, $stats ) if defined $self->{_stats_cb}; } sub _process_event_delivery_reports { my ($self, $event) = @_; while (my $report = $event->event_delivery_report_next()) { my $msg_id = $report->{msg_id}; if (exists $report->{err}) { $self->_reject_deferred($msg_id, { error => $report->{err_msg}, error_code => $report->{err}, }); } else { $self->_resolve_deferred($msg_id, { offset => $report->{offset}, partition => $report->{partition}, timestamp => $report->{timestamp}, }); } } } sub produce { my ($self, %args) = @_; $self->_check_if_closed(); my $topic = $args{topic} or die 'topic name is required'; my $partition = $args{partition} // RD_KAFKA_PARTITION_UA; my $timestamp = $args{timestamp} // 0; my $key = $args{key}; my $payload = $args{payload}; my $headers = $args{headers}; my $d = deferred; my $msg_id = $self->_register_deferred($d, \%args); eval { $self->{_kafka}->produce( $topic, $partition, $key, $payload, $timestamp, $msg_id, 0, $headers, ); 1; } or do { my $err = $@ || "Zombie Error"; $self->_reject_deferred($msg_id, { error => $err, error_code => Net::Kafka::Error::last_error(), }); }; return $d->promise; } sub _register_deferred { my ($self, $deferred, $args) = @_; my $id = $self->{_max_id} + 1; $self->{_max_id} = $id; $self->{_in_flight}{$id} = { deferred => $deferred, args => $args, }; return $id; } sub _resolve_deferred { my ($self, $msg_id, $data) = @_; my $msg = delete $self->{_in_flight}{$msg_id} or die "Cannot find an in-flight deferred with id $msg_id"; my $d = $msg->{deferred}; my $args = $msg->{args}; $d->resolve({ %$args, %$data }); } sub _reject_deferred { my ($self, $msg_id, $data) = @_; my $msg = delete $self->{_in_flight}{$msg_id} or die "Cannot find an in-flight deferred with id $msg_id"; my $d = $msg->{deferred}; my $args = $msg->{args}; $d->reject({ %$args, %$data }); } sub _reject_all_deferred { my ($self, $data) = @_; my @msg_ids = keys %{ $self->{_in_flight} }; $self->_reject_deferred($_, $data) foreach @msg_ids; } sub partitions_for { my ($self, $topic, $timeout_ms) = @_; $self->_check_if_closed(); return $self->{_kafka}->partitions_for($topic, $timeout_ms); } sub _check_if_closed { my $self = shift; die "Producer is closed" if $self->{_is_closed}; } sub close { my $self = shift; return if $self->{_is_closed}; $self->{_kafka}->close() if defined $self->{_kafka}; $self->{_watcher} = undef; close $self->{_read_queue_fd}; close $self->{_write_queue_fd}; $self->{_is_closed} = 1; } sub DESTROY { my $self = shift; $self->close(); } 1; Net-Kafka-1.06/lib/Net/Kafka/PaxHeader/Consumer.pm000644 777777 777777 00000000300 13556002700 025107 xustar00amironovCORPAD\Domain Users000000 000000 17 gid=697783653 18 uid=2117034315 30 mtime=1572341184.379235266 30 ctime=1572341356.231540125 30 atime=1572341356.474296894 23 SCHILY.dev=16777220 26 SCHILY.ino=12902636764 18 SCHILY.nlink=2 Net-Kafka-1.06/lib/Net/Kafka/Consumer.pm000644 €~/aK€)—Ue00000006051 13556002700 023546 0ustar00amironovCORPAD\Domain Users000000 000000 package Net::Kafka::Consumer; use strict; use warnings; use Net::Kafka qw/RD_KAFKA_CONSUMER/; use Net::Kafka::Util; use constant { DEFAULT_SEEK_TIMEOUT => 1000, DEFAULT_FETCH_OFFSET_TIMEOUT => 5000, DEFAULT_ERROR_CB => sub { my ($self, $err, $msg) = @_; warn sprintf("WARNING: Net::Kafka::Consumer: %s (%s)", $msg, $err); }, }; sub new { my ($class, %args) = @_; my $rdkafka_version = Net::Kafka::rd_kafka_version(); my $error_cb = delete $args{error_cb} // DEFAULT_ERROR_CB; my $config = Net::Kafka::Util::build_config({ 'error_cb' => $error_cb, %args }); my $kafka = delete $args{kafka} || Net::Kafka->new(RD_KAFKA_CONSUMER, $config); return bless { _kafka => $kafka, _is_closed => 0, }, $class; } sub subscribe { my ($self, $topics) = @_; $self->{_kafka}->subscribe($topics); } sub unsubscribe { my ($self) = @_; $self->{_kafka}->unsubscribe(); } sub assign { my $self = shift; $self->{_kafka}->assign(@_); } sub poll { my $self = shift; return $self->{_kafka}->consumer_poll(@_); } sub committed { my ($self, $tp_list, $timeout_ms) = @_; $self->{_kafka}->committed($tp_list, $timeout_ms // DEFAULT_FETCH_OFFSET_TIMEOUT); } sub offsets_for_times { my ($self, $tp_list, $timeout_ms) = @_; $self->{_kafka}->offsets_for_times($tp_list, $timeout_ms // DEFAULT_FETCH_OFFSET_TIMEOUT); } sub query_watermark_offsets { my ($self, $topic, $partition, $timeout_ms) = @_; $self->{_kafka}->query_watermark_offsets($topic, $partition, $timeout_ms // DEFAULT_FETCH_OFFSET_TIMEOUT); } sub pause { my ($self, $tp_list) = @_; $self->{_kafka}->pause($tp_list); } sub position { my ($self, $tp_list) = @_; $self->{_kafka}->position($tp_list); } sub resume { my ($self, $tp_list) = @_; $self->{_kafka}->resume($tp_list); } sub subscription { my $self = shift; my $topics = []; my $subscription = $self->{_kafka}->subscription(); for ( my $i=0; $i < $subscription->size(); $i++ ) { my ($t) = $subscription->get($i); push @$topics, $t; } return $topics; } sub partitions_for { my ($self, $topic, $timeout_ms) = @_; return $self->{_kafka}->partitions_for($topic, $timeout_ms); } sub commit { my ($self, $async, $topic_partitions) = @_; $async //= 0; $topic_partitions ? $self->{_kafka}->commit($async, $topic_partitions) : $self->{_kafka}->commit($async); } sub commit_message { my ($self, $async, $msg) = @_; $async //= 0; $self->{_kafka}->commit_message($async, $msg); } sub seek { my ($self, $topic, $partition, $offset, $timeout_ms) = @_; $timeout_ms //= DEFAULT_SEEK_TIMEOUT; $self->{_kafka}->topic($topic)->seek($partition, $offset, $timeout_ms); } sub close { my $self = shift; return if $self->{_is_closed}; $self->{_kafka}->close() if defined $self->{_kafka}; $self->{_is_closed} = 1; } sub DESTROY { my $self = shift; $self->close(); } 1; Net-Kafka-1.06/scripts/PaxHeader/generate_readme.sh000755 777777 777777 00000000360 13554556726 025634 xustar00amironovCORPAD\Domain Users000000 000000 17 gid=697783653 18 uid=2117034315 30 mtime=1572003286.778454046 30 ctime=1572341356.226768929 30 atime=1572341356.473875873 48 LIBARCHIVE.creationtime=1572001121.390720054 23 SCHILY.dev=16777220 26 SCHILY.ino=12902231428 18 SCHILY.nlink=2 Net-Kafka-1.06/scripts/generate_readme.sh000755 €~/aK€)—Ue00000000246 13554556726 024265 0ustar00amironovCORPAD\Domain Users000000 000000 #!/bin/bash set -e VERSION_FROM=lib/Net/Kafka.pm perl -MPod::Markdown -e 'Pod::Markdown->new->filter(@ARGV)' $VERSION_FROM > README.md pod2readme $VERSION_FROM READMENet-Kafka-1.06/t/PaxHeader/01-producer.t000644 777777 777777 00000000300 13556002700 023136 xustar00amironovCORPAD\Domain Users000000 000000 17 gid=697783653 18 uid=2117034315 30 mtime=1572341184.381798845 30 ctime=1572341356.229938161 30 atime=1572341356.359094505 23 SCHILY.dev=16777220 26 SCHILY.ino=12902636766 18 SCHILY.nlink=2 Net-Kafka-1.06/t/01-producer.t000644 €~/aK€)—Ue00000012300 13556002700 021567 0ustar00amironovCORPAD\Domain Users000000 000000 #!perl use strict; use warnings; use File::Basename; use lib File::Basename::dirname(__FILE__).'/lib'; use Test::More; use TestProducer; use AnyEvent; use AnyEvent::XSPromises qw/collect/; use Data::Dumper qw/Dumper/; plan skip_all => "Missing Kafka test environment" unless TestProducer->is_ok; subtest 'Partitions For' => sub { my $producer = TestProducer->new(); my $partitions = $producer->partitions_for( TestProducer::topic ); isa_ok($partitions, 'ARRAY'); is (scalar(@$partitions), TestProducer::topic_partitions(), "fetch partitions info"); }; subtest 'Partitions For (missing topic)' => sub { my $producer = TestProducer->new(); eval { my $partitions = $producer->partitions_for( 'non-existing-topic-name' ); ok(0); 1; } or do { ok(1); }; }; subtest 'Producer Error Callback' => sub { my $error_cb_called = 0; my $producer = TestProducer->new( 'bootstrap.servers' => 'localhost:1234', 'message.timeout.ms' => 1000, error_cb => sub { my ($self, $err, $msg) = @_; $error_cb_called = 1; }, ); my $condvar = AnyEvent->condvar; $producer->produce( topic => TestProducer::topic, payload => "test" )->then(sub { ok(0); })->catch(sub { ok(1); })->finally(sub { $condvar->send; }); $condvar->recv; ok($error_cb_called); }; subtest 'Producer Stats Callback' => sub { my $stats_cb_called = 0; my $stats_cb_received_json = 0; my $producer = TestProducer->new( 'statistics.interval.ms' => 1, 'stats_cb' => sub { my $self = shift; my $json = shift; $stats_cb_called++; $stats_cb_received_json = $json =~ m/^{.*}$/; }, ); for (my $i = 0; $i < 10; $i++) { my $condvar = AnyEvent->condvar; $producer->produce( topic => TestProducer::topic, payload => "test" )->then(sub { ok(1); })->catch(sub { ok(0); })->finally(sub { $condvar->send; }); $condvar->recv; } ok($stats_cb_called, 'Stats callback called'); ok($stats_cb_called > 1, 'Stats callback called more than once'); ok($stats_cb_received_json, 'Stats callback received_json'); }; subtest 'Close producer' => sub { my $producer = TestProducer->new(); eval { $producer->close(); ok(1); 1; } or do { ok(0) or diag($@); }; }; subtest 'Produce' => sub { my $producer = TestProducer->new(); my $condvar = AnyEvent->condvar; $producer->produce( topic => TestProducer::topic, key => "test_key", payload => "test_payload", )->then(sub { my $dr = shift; $condvar->send; isa_ok($dr, 'HASH'); is($dr->{topic}, TestProducer::topic); like($dr->{partition}, qr/\d+/); like($dr->{timestamp}, qr/\d+/); like($dr->{offset}, qr/\d+/); is($dr->{payload}, "test_payload"); is($dr->{key}, "test_key"); })->catch(sub { my $dr = shift; $condvar->send; ok(0) or diag($dr->{error}); }); $condvar->recv; }; subtest 'Produce 100 items' => sub { my $producer = TestProducer->new(); my $condvar = AnyEvent->condvar; my $count = 0; my @promises = (); for (my $i = 0; $i < 100; $i++) { push @promises => $producer->produce( topic => TestProducer::topic, payload => "test_payload", )->then(sub { my $dr = shift; like($dr->{offset}, qr/\d+/); $count++; })->catch(sub { my $dr = shift; ok(0) or diag($dr->{error}); }); } collect(@promises)->then(sub { $condvar->send; })->catch(sub { $condvar->send; }); $condvar->recv; is($count, 100); }; subtest 'Produce NULL payload (tombstone)' => sub { my $producer = TestProducer->new(); my $condvar = AnyEvent->condvar; $producer->produce( topic => TestProducer::topic, key => "test_key", )->then(sub { my $dr = shift; $condvar->send; like($dr->{offset}, qr/\d+/); is($dr->{payload}, undef); is($dr->{key}, "test_key"); })->catch(sub { my $dr = shift; $condvar->send; ok(0) or diag($dr->{error}); }); $condvar->recv; }; subtest 'Produce w/o args' => sub { my $producer = TestProducer->new(); eval { $producer->produce(); ok(0, "Produced w/o necessary arguments"); 1; } or do { ok(1); }; }; subtest 'Produce wrong args' => sub { my $producer = TestProducer->new(); my $condvar = AnyEvent->condvar; $producer->produce( topic => TestProducer::topic, payload => "test_payload", partition => -100, )->then(sub { my $dr = shift; $condvar->send; ok(0, "Produced into unknown partition"); })->catch(sub { my $dr = shift; $condvar->send; is($dr->{error_code}, -190); like($dr->{error}, qr/Unknown partition/); }); $condvar->recv; }; done_testing(); Net-Kafka-1.06/t/PaxHeader/constants.t000644 777777 777777 00000000277 13554325634 023142 xustar00amironovCORPAD\Domain Users000000 000000 17 gid=697783653 18 uid=2117034315 30 mtime=1571924892.691935359 29 ctime=1572341356.23047173 30 atime=1572341356.359036502 23 SCHILY.dev=16777220 26 SCHILY.ino=12902123336 18 SCHILY.nlink=3 Net-Kafka-1.06/t/constants.t000644 €~/aK€)—Ue00000000551 13554325634 021563 0ustar00amironovCORPAD\Domain Users000000 000000 use strict; use warnings; use Test::More tests => 3; use Net::Kafka qw(RD_KAFKA_RESP_ERR_NO_ERROR RD_KAFKA_RESP_ERR_REQUEST_TIMED_OUT); is RD_KAFKA_RESP_ERR_NO_ERROR, 0, "NO ERROR code is 0"; is RD_KAFKA_RESP_ERR_REQUEST_TIMED_OUT, 7, "REQUEST TIMED OUT code is 7"; is Net::Kafka::RD_KAFKA_RESP_ERR__NOT_IMPLEMENTED(), -170, "constants accessible w/o import"; Net-Kafka-1.06/t/PaxHeader/00-load.t000644 777777 777777 00000000300 13554325634 022245 xustar00amironovCORPAD\Domain Users000000 000000 17 gid=697783653 18 uid=2117034315 30 mtime=1571924892.692616523 30 ctime=1572341356.088018537 30 atime=1572341356.384126618 23 SCHILY.dev=16777220 26 SCHILY.ino=12902123337 18 SCHILY.nlink=3 Net-Kafka-1.06/t/00-load.t000644 €~/aK€)—Ue00000000300 13554325634 020673 0ustar00amironovCORPAD\Domain Users000000 000000 #!perl -T use strict; use warnings; use Test::More tests => 1; BEGIN { use_ok( 'Net::Kafka' ) || print "Bail out!\n"; } diag( "Testing Net::Kafka $Net::Kafka::VERSION, Perl $], $^X" ); Net-Kafka-1.06/t/PaxHeader/lib000755 777777 777777 00000000300 13556003154 021410 xustar00amironovCORPAD\Domain Users000000 000000 17 gid=697783653 18 uid=2117034315 30 mtime=1572341356.225881886 30 ctime=1572341356.225881886 30 atime=1572341356.893050345 23 SCHILY.dev=16777220 26 SCHILY.ino=12902637734 18 SCHILY.nlink=4 Net-Kafka-1.06/t/lib/000755 €~/aK€)—Ue00000000000 13556003154 020117 5ustar00amironovCORPAD\Domain Users000000 000000 Net-Kafka-1.06/t/PaxHeader/02-topic-list.t000644 777777 777777 00000000277 13554325634 023434 xustar00amironovCORPAD\Domain Users000000 000000 17 gid=697783653 18 uid=2117034315 29 mtime=1571924892.69492553 30 ctime=1572341356.059511431 30 atime=1572341356.359067293 23 SCHILY.dev=16777220 26 SCHILY.ino=12902123341 18 SCHILY.nlink=3 Net-Kafka-1.06/t/02-topic-list.t000644 €~/aK€)—Ue00000003046 13554325634 022057 0ustar00amironovCORPAD\Domain Users000000 000000 #!perl -T use Test::More tests => 14; BEGIN { use_ok( 'Net::Kafka' ) || print "Bail out!\n"; } # Create a list and add some data. my $tp_list = Net::Kafka::TopicPartitionList->new; # Add the same one multiple times. $tp_list->add("my-topic1", 0); $tp_list->add("my-topic1", 0); $tp_list->add("my-topic1", 0); $tp_list->add("my-topic1", 0); $tp_list->set_offset("my-topic1", 0, -4444); my $o = $tp_list->offset("my-topic1", 0); ok($o == -4444, 'offset getter ok'); $tp_list->set_offset("my-topic1", 0, 4444); $tp_list->add("my-topic1", 1); $tp_list->set_offset("my-topic1", 1, 5555); $tp_list->add("my-topic1", 2); $tp_list->set_offset("my-topic1", 2, 6666); $tp_list->add("my-topic1", 3); $o = $tp_list->offset("my-topic1-not-there", 9999); ok(!$o, 'offset getter of non-existant topic ok'); ok($tp_list->size() == 4, 'size of list is ok'); # Test deletion works. $tp_list->del("my-topic1", 3); ok($tp_list->size() == 3, 'size of list minus the deleted is ok'); my ($topic, $partition, $offset); ($topic, $partition, $offset) = $tp_list->get(0); ok($topic eq "my-topic1", 'topic name is ok'); ok($partition == 0, 'partition 0 is ok'); ok($offset == 4444, 'offset for part 0 is ok'); ($topic, $partition, $offset) = $tp_list->get(1); ok($topic eq "my-topic1", 'topic name is ok'); ok($partition == 1, 'partition 1 is ok'); ok($offset == 5555, 'offset for part 1 is ok'); ($topic, $partition, $offset) = $tp_list->get(2); ok($topic eq "my-topic1", 'topic name is ok'); ok($partition == 2, 'partition 2 is ok'); ok($offset == 6666, 'offset for part 2 is ok'); Net-Kafka-1.06/t/PaxHeader/03-consumer.t000644 777777 777777 00000000277 13554325634 023201 xustar00amironovCORPAD\Domain Users000000 000000 17 gid=697783653 18 uid=2117034315 30 mtime=1571924892.695831999 29 ctime=1572341356.22731939 30 atime=1572341356.359081401 23 SCHILY.dev=16777220 26 SCHILY.ino=12902123342 18 SCHILY.nlink=3 Net-Kafka-1.06/t/03-consumer.t000644 €~/aK€)—Ue00000030173 13554325634 021625 0ustar00amironovCORPAD\Domain Users000000 000000 #!perl use strict; use warnings; use File::Basename; use lib File::Basename::dirname(__FILE__).'/lib'; use Test::More; use TestConsumer; use TestProducer; use AnyEvent; use AnyEvent::XSPromises qw/collect/; use Data::Dumper qw/Dumper/; use Net::Kafka qw/ RD_KAFKA_RESP_ERR__PARTITION_EOF RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS RD_KAFKA_RESP_ERR_NO_ERROR RD_KAFKA_OFFSET_END RD_KAFKA_OFFSET_BEGINNING /; use constant BATCH_SIZE => 10; use constant TIMEOUT => 30; # 30 seconds plan skip_all => "Missing Kafka test environment" unless TestConsumer->is_ok; my $producer = TestProducer->new(); my $test_id = TestConsumer::group_prefix() . time(); diag($test_id); # Produce BATCH_SIZE messages into test topic and return delivery report map # per partition like: # # { # 0 => { # reports => { # 77544 => { ... }, # 77545 => { ... }, # 77546 => { ... }, # }, # min_offset => 77544, # max_offset => 77546, # }, # 1 => { # ... # } # } # # sub produce { my %args = @_; my $condvar = AnyEvent->condvar; my $result = {}; my @promises = (); for (my $i = 0; $i < BATCH_SIZE; $i++) { push @promises => $producer->produce( topic => TestConsumer::topic, payload => $args{payload}, ( defined $args{partition} ? ( partition => $args{partition} ) : () ), ( defined $args{headers} ? ( headers => $args{headers} ) : () ), )->then(sub { my $dr = shift; $result->{ $dr->{partition} }{reports}{ $dr->{offset} } = $dr; $result->{ $dr->{partition} }{min_offset} = $dr->{offset} if ! defined $result->{ $dr->{partition} }{min_offset} || $dr->{offset} < $result->{ $dr->{partition} }{min_offset}; $result->{ $dr->{partition} }{max_offset} = $dr->{offset} if ! defined $result->{ $dr->{partition} }{max_offset} || $dr->{offset} > $result->{ $dr->{partition} }{max_offset}; }); } collect(@promises)->then(sub { $condvar->send; })->catch(sub { ok(0) or diag(@_); $condvar->send; }); $condvar->recv; return $result; } # Consume a batch of messages produce by "produce" method # and check their sanity sub consume { my ($consumer, $group_id, $messages, $test) = @_; my $start_time = time(); my $consumed = 0; my @received; while(1) { if (time() - $start_time > TIMEOUT) { ok(0, "timeout reading messages from topic: $test"); last; } my $message = $consumer->poll(100); next unless $message; if ( $message->err ) { is ( $message->err, RD_KAFKA_RESP_ERR__PARTITION_EOF ); } else { next unless $message->payload; next unless $message->payload eq $group_id; is( $message->topic, TestConsumer::topic ); like($message->offset, qr/\d+/); like($message->partition, qr/\d+/); ok(0, "unexpected message consumed") unless exists $messages->{ $message->partition }{ reports }{ $message->offset }; $consumed++; push @received, $message; } last if $consumed == BATCH_SIZE; } return @received; } subtest 'Position' => sub { my $group_id = sprintf("%s__position", $test_id); my $consumer = TestConsumer->new( 'group.id' => $group_id ); my $tp_list = Net::Kafka::TopicPartitionList->new(); $tp_list->add(TestConsumer::topic, 0); $tp_list->set_offset(TestConsumer::topic, 0, RD_KAFKA_OFFSET_BEGINNING); $consumer->assign($tp_list); my $start_time = time(); while (1) { if (time() - $start_time > TIMEOUT) { ok(0, "timeout reading messages before finding the position"); last; } my $message = $consumer->poll(); next unless $message; next if $message->err; my $offset = $message->offset; my $position_list = Net::Kafka::TopicPartitionList->new(); $position_list->add(TestConsumer::topic, 0); $consumer->position($position_list); my $position = $position_list->offset(TestConsumer::topic, 0); like($position, qr/\d+/); is($position, $offset + 1); last; } }; subtest 'Query Watermark Offsets' => sub { my $consumer = TestConsumer->new(); my @offsets = $consumer->query_watermark_offsets( TestConsumer::topic, 0 ); is(scalar(@offsets), 2); like($offsets[0], qr/\d+/); like($offsets[1], qr/\d+/); ok($offsets[0] <= $offsets[1]); }; subtest 'Commit w/o local offset stored' => sub { my $consumer = TestConsumer->new( 'group.id' => $test_id ); eval { $consumer->commit(); ok(1); 1; } or do { ok(0) or diag($@); }; }; subtest 'Partitions For' => sub { my $consumer = TestConsumer->new(); my $partitions = $consumer->partitions_for( TestConsumer::topic ); isa_ok($partitions, 'ARRAY'); is (scalar(@$partitions), TestConsumer::topic_partitions, "fetch partitions info"); }; subtest 'Partitions For (missing topic)' => sub { my $consumer = TestConsumer->new(); eval { my $partitions = $consumer->partitions_for( 'non-existing-topic-name' ); ok(0); 1; } or do { ok(1); }; }; subtest 'Consumer Error Callback' => sub { my $group_id = sprintf("%s__error", $test_id); my $error_cb_called = 0; my $consumer = TestConsumer->new( 'group.id' => $group_id, 'bootstrap.servers' => 'localhost:1234', 'error_cb' => sub { my $self = shift; $error_cb_called = 1; }, ); my $start_time = time(); while ($error_cb_called != 1) { last if time() - $start_time > TIMEOUT; $consumer->poll(); } ok($error_cb_called, 'Error callback called'); $consumer->close(); }; subtest 'Consumer Stats Callback' => sub { my $group_id = sprintf("%s__error", $test_id); my $stats_cb_called = 0; my $stats_cb_received_json = 0; my $consumer = TestConsumer->new( 'group.id' => $group_id, 'statistics.interval.ms' => 1, 'stats_cb' => sub { my $self = shift; my $json = shift; $stats_cb_called = 1; $stats_cb_received_json = $json =~ m/^{.*}$/; }, ); my $start_time = time(); while ($stats_cb_called != 1) { last if time() - $start_time > TIMEOUT; $consumer->poll(); } ok($stats_cb_called, 'Stats callback called'); ok($stats_cb_received_json, 'Stats callback received_json'); $consumer->close(); }; subtest 'Distributed Consumer' => sub { my $group_id = sprintf("%s__distributed", $test_id); my $messages = produce( payload => $group_id ); my $consumer = TestConsumer->new( 'group.id' => $group_id, 'offset_commit_cb' => sub { my ( $self, $err, $tp_list ) = @_; is( $err, RD_KAFKA_RESP_ERR_NO_ERROR, "commit callback unexpected err $err" ); }, # When rebalance is finished make sure to seek offsets to the ones # that are produced by "produce" call above 'rebalance_cb' => sub { my ( $self, $err, $tp_list ) = @_; if ( $err == RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS ) { is( $tp_list->size, TestConsumer::topic_partitions ); foreach my $partition ( keys %$messages ) { like($tp_list->offset( TestConsumer::topic, $partition ), qr/\d+/); $tp_list->set_offset( TestConsumer::topic, $partition, $messages->{$partition}{min_offset} ); } $self->assign( $tp_list ); } elsif ( $err == RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS ) { $self->assign(); } else { ok(0, "rebalance cb unexpected err $err"); } }, ); # Sub $consumer->subscribe([ TestConsumer::topic ]); # Check our current subscription my $subscription = $consumer->subscription(); isa_ok( $subscription, 'ARRAY' ); is( scalar(@$subscription), 1, "subscription info size" ); is( $subscription->[0], TestConsumer::topic ); # Consume produced messages consume($consumer, $group_id, $messages, "consuming from assigned partitions"); # Unsub $consumer->unsubscribe(); $consumer->close(); }; subtest 'Simple Consumer' => sub { my $group_id = sprintf("%s__simple", $test_id); # Produce message into partition "0" my $messages = produce( payload => $group_id, partition => 0 ); my $consumer = TestConsumer->new( 'enable.auto.commit' => 'false', # Disable autocommit 'group.id' => $group_id, ); # Generate custom partition assignment and set offset to a min offset # produced by "produce" call above, we are only listening to partition "0" my $tp_list = Net::Kafka::TopicPartitionList->new(); $tp_list->add( TestConsumer::topic, 0 ); $tp_list->set_offset( TestConsumer::topic, 0, $messages->{ 0 }{ min_offset } ); is( $tp_list->size(), 1 ); isnt( $messages->{ 0 }{ min_offset }, $messages->{ 0 }{ max_offset } ); $consumer->assign($tp_list); # Consume messages consume($consumer, $group_id, $messages, "consuming from partition 0 for the first time"); # Commit consumed messages $consumer->commit(); # Check what is actually committed $consumer->committed($tp_list); is( $tp_list->offset(TestConsumer::topic, 0), $messages->{0}{max_offset} + 1 ); # Seek to the first offset $consumer->seek( TestConsumer::topic, 0, $messages->{0}{min_offset} ); # Consume messages once again consume($consumer, $group_id, $messages, "consuming from partition 0 after manual seek"); # Close consumer cleanly $consumer->close(); }; subtest 'Consume headers' => sub { my $group_id = sprintf("%s__simple", $test_id); my $headers = Net::Kafka::Headers->new(); $headers->add('head', 'tail'); # Produce message into partition "0" my $messages = produce( payload => $group_id, partition => 0, headers => $headers ); my $consumer = TestConsumer->new( 'enable.auto.commit' => 'false', # Disable autocommit 'group.id' => $group_id, ); # Generate custom partition assignment and set offset to a min offset # produced by "produce" call above, we are only listening to partition "0" my $tp_list = Net::Kafka::TopicPartitionList->new(); $tp_list->add( TestConsumer::topic, 0 ); $tp_list->set_offset( TestConsumer::topic, 0, $messages->{ 0 }{ min_offset } ); is( $tp_list->size(), 1 ); isnt( $messages->{ 0 }{ min_offset }, $messages->{ 0 }{ max_offset } ); $consumer->assign($tp_list); # Consume messages my @messages = consume($consumer, $group_id, $messages, "consuming from partition 0 for the first time"); is(scalar @messages, BATCH_SIZE); my $received_message = $messages[0]; # headers can be retrieved multiple times { my $received_headers = $received_message->headers(); isa_ok($received_headers, 'Net::Kafka::Headers'); is($received_headers->get_last('head'), 'tail'); } { my $received_headers = $received_message->headers(); isa_ok($received_headers, 'Net::Kafka::Headers'); is($received_headers->get_last('head'), 'tail'); } # headers can be retrieved multiple times (they are recreated transparently after detach) { my $received_headers = $received_message->detach_headers(); isa_ok($received_headers, 'Net::Kafka::Headers'); is($received_headers->get_last('head'), 'tail'); } { my $received_headers = $received_message->detach_headers(); isa_ok($received_headers, 'Net::Kafka::Headers'); is($received_headers->get_last('head'), 'tail'); } }; subtest 'Close consumer' => sub { my $consumer = TestConsumer->new(); eval { $consumer->close(); ok(1); 1; } or do { ok(0) or diag($@); }; }; done_testing(); Net-Kafka-1.06/t/lib/PaxHeader/TestProducer.pm000644 777777 777777 00000000300 13554325634 024453 xustar00amironovCORPAD\Domain Users000000 000000 17 gid=697783653 18 uid=2117034315 30 mtime=1571924892.693620753 30 ctime=1572341356.225491151 30 atime=1572341356.433553668 23 SCHILY.dev=16777220 26 SCHILY.ino=12902123339 18 SCHILY.nlink=3 Net-Kafka-1.06/t/lib/TestProducer.pm000644 €~/aK€)—Ue00000001026 13554325634 023107 0ustar00amironovCORPAD\Domain Users000000 000000 package TestProducer; use strict; use warnings; use Net::Kafka::Producer; sub is_ok { return bootstrap_servers() && topic() && topic_partitions(); } sub bootstrap_servers { return $ENV{KAFKA_BOOTSTRAP_SERVERS}; } sub topic { return $ENV{KAFKA_TEST_TOPIC}; } sub topic_partitions { return $ENV{KAFKA_TEST_TOPIC_PARTITIONS}; } sub new { my ($class, %args) = @_; return Net::Kafka::Producer->new( 'bootstrap.servers' => $args{'bootstrap.servers'} // bootstrap_servers(), %args ); } 1; Net-Kafka-1.06/t/lib/PaxHeader/TestConsumer.pm000644 777777 777777 00000000277 13554325634 024500 xustar00amironovCORPAD\Domain Users000000 000000 17 gid=697783653 18 uid=2117034315 30 mtime=1571924892.694188886 30 ctime=1572341356.226080966 29 atime=1572341357.22693411 23 SCHILY.dev=16777220 26 SCHILY.ino=12902123340 18 SCHILY.nlink=3 Net-Kafka-1.06/t/lib/TestConsumer.pm000644 €~/aK€)—Ue00000001154 13554325634 023121 0ustar00amironovCORPAD\Domain Users000000 000000 package TestConsumer; use strict; use warnings; use Net::Kafka::Consumer; sub is_ok { return bootstrap_servers() && topic() && topic_partitions() && group_prefix(); } sub bootstrap_servers { return $ENV{KAFKA_BOOTSTRAP_SERVERS}; } sub topic { return $ENV{KAFKA_TEST_TOPIC}; } sub topic_partitions { return $ENV{KAFKA_TEST_TOPIC_PARTITIONS}; } sub group_prefix { return $ENV{KAFKA_CONSUMER_GROUP_PREFIX}; } sub new { my ($class, %args) = @_; return Net::Kafka::Consumer->new( 'bootstrap.servers' => $args{'bootstrap.servers'} // bootstrap_servers(), %args ); } 1;