webpy/ 0000755 0001750 0001750 00000000000 13146625361 010347 5 ustar wmb wmb webpy/experimental/ 0000755 0001750 0001750 00000000000 13146625266 013050 5 ustar wmb wmb webpy/experimental/background.py 0000644 0001750 0001750 00000002560 13146625266 015544 0 ustar wmb wmb """Helpers functions to run log-running tasks."""
from web import utils
from web import webapi as web
def background(func):
"""A function decorator to run a long-running function as a background thread."""
def internal(*a, **kw):
web.data() # cache it
tmpctx = web._context[threading.currentThread()]
web._context[threading.currentThread()] = utils.storage(web.ctx.copy())
def newfunc():
web._context[threading.currentThread()] = tmpctx
func(*a, **kw)
myctx = web._context[threading.currentThread()]
for k in myctx.keys():
if k not in ['status', 'headers', 'output']:
try: del myctx[k]
except KeyError: pass
t = threading.Thread(target=newfunc)
background.threaddb[id(t)] = t
t.start()
web.ctx.headers = []
return seeother(changequery(_t=id(t)))
return internal
background.threaddb = {}
def backgrounder(func):
def internal(*a, **kw):
i = web.input(_method='get')
if '_t' in i:
try:
t = background.threaddb[int(i._t)]
except KeyError:
return web.notfound()
web._context[threading.currentThread()] = web._context[t]
return
else:
return func(*a, **kw)
return internal
webpy/experimental/migration.py 0000644 0001750 0001750 00000005204 13146625266 015414 0 ustar wmb wmb """Migration script to run web.py 0.23 programs using 0.3.
Import this module at the beginning of your program.
"""
import web
import sys
def setup_database():
if web.config.get('db_parameters'):
db = web.database(**web.config.db_parameters)
web.insert = db.insert
web.select = db.select
web.update = db.update
web.delete = db.delete
web.query = db.query
def transact():
t = db.transaction()
web.ctx.setdefault('transaction_stack', []).append(t)
def rollback():
stack = web.ctx.get('transaction_stack')
t = stack and stack.pop()
t and t.rollback()
def commit():
stack = web.ctx.get('transaction_stack')
t = stack and stack.pop()
t and t.commit()
web.transact = transact
web.rollback = rollback
web.commit = commit
web.loadhooks = web.webapi.loadhooks = {}
web._loadhooks = web.webapi._loadhooks = {}
web.unloadhooks = web.webapi.unloadhooks = {}
def load():
setup_database()
web.load = load
def run(urls, fvars, *middleware):
setup_database()
def stdout_processor(handler):
handler()
return web.ctx.get('output', '')
def hook_processor(handler):
for h in web.loadhooks.values() + web._loadhooks.values(): h()
output = handler()
for h in web.unloadhooks.values(): h()
return output
app = web.application(urls, fvars)
app.add_processor(stdout_processor)
app.add_processor(hook_processor)
app.run(*middleware)
class _outputter:
"""Wraps `sys.stdout` so that print statements go into the response."""
def __init__(self, file): self.file = file
def write(self, string_):
if hasattr(web.ctx, 'output'):
return output(string_)
else:
self.file.write(string_)
def __getattr__(self, attr): return getattr(self.file, attr)
def __getitem__(self, item): return self.file[item]
def output(string_):
"""Appends `string_` to the response."""
string_ = web.safestr(string_)
if web.ctx.get('flush'):
web.ctx._write(string_)
else:
web.ctx.output += str(string_)
def _capturedstdout():
sysstd = sys.stdout
while hasattr(sysstd, 'file'):
if isinstance(sys.stdout, _outputter): return True
sysstd = sysstd.file
if isinstance(sys.stdout, _outputter): return True
return False
if not _capturedstdout():
sys.stdout = _outputter(sys.stdout)
web.run = run
class Stowage(web.storage):
def __str__(self):
return self._str
web.template.Stowage = web.template.stowage = Stowage
webpy/experimental/untwisted.py 0000644 0001750 0001750 00000007116 13146625266 015455 0 ustar wmb wmb import random
from twisted.internet import reactor, defer
from twisted.web import http
import simplejson
import web
class Request(http.Request):
def process(self):
self.content.seek(0, 0)
env = {
'REMOTE_ADDR': self.client.host,
'REQUEST_METHOD': self.method,
'PATH_INFO': self.path,
'CONTENT_LENGTH': web.intget(self.getHeader('content-length'), 0),
'wsgi.input': self.content
}
if '?' in self.uri:
env['QUERY_STRING'] = self.uri.split('?', 1)[1]
for k, v in self.received_headers.iteritems():
env['HTTP_' + k.upper()] = v
if self.path.startswith('/static/'):
f = web.lstrips(self.path, '/static/')
assert '/' not in f
#@@@ big security hole
self.write(file('static/' + f).read())
return self.finish()
web.webapi._load(env)
web.ctx.trequest = self
result = self.actualfunc()
self.setResponseCode(int(web.ctx.status.split()[0]))
for (h, v) in web.ctx.headers:
self.setHeader(h, v)
self.write(web.ctx.output)
if not web.ctx.get('persist'):
self.finish()
class Server(http.HTTPFactory):
def __init__(self, func):
self.func = func
def buildProtocol(self, addr):
"""Generate a channel attached to this site.
"""
channel = http.HTTPFactory.buildProtocol(self, addr)
class MyRequest(Request):
actualfunc = staticmethod(self.func)
channel.requestFactory = MyRequest
channel.site = self
return channel
def runtwisted(func):
reactor.listenTCP(8086, Server(func))
reactor.run()
def newrun(inp, fvars):
print "Running on http://0.0.0.0:8086/"
runtwisted(web.webpyfunc(inp, fvars, False))
def iframe(url):
return """
""" % url #("http://%s.ajaxpush.lh.theinfo.org:8086%s" % (random.random(), url))
class Feed:
def __init__(self):
self.sessions = []
def subscribe(self):
request = web.ctx.trequest
self.sessions.append(request)
request.connectionLost = lambda reason: self.sessions.remove(request)
web.ctx.persist = True
def publish(self, text):
for x in self.sessions:
x.write(text)
class JSFeed(Feed):
def __init__(self, callback="callback"):
Feed.__init__(self)
self.callback = callback
def publish(self, obj):
web.debug("publishing")
Feed.publish(self,
'' % (self.callback, simplejson.dumps(obj) +
" " * 2048))
if __name__ == "__main__":
mfeed = JSFeed()
urls = (
'/', 'view',
'/js', 'js',
'/send', 'send'
)
class view:
def GET(self):
print """
Today's News
Contribute
"""
class js:
def GET(self):
mfeed.subscribe()
class send:
def POST(self):
mfeed.publish('%s
' % web.input().text + (" " * 2048))
web.seeother('/')
newrun(urls, globals()) webpy/experimental/pwt.py 0000644 0001750 0001750 00000004426 13146625266 014242 0 ustar wmb wmb import web
import simplejson, sudo
urls = (
'/sudo', 'sudoku',
'/length', 'length',
)
class pwt(object):
_inFunc = False
updated = {}
page = """
%s
"""
def GET(self):
web.header('Content-Type', 'text/html')
print self.page % self.form()
def POST(self):
i = web.input()
if '_' in i: del i['_']
#for k, v in i.iteritems(): setattr(self, k, v)
self._inFunc = True
self.work(**i)
self._inFunc = False
web.header('Content-Type', 'text/javascript')
print 'receive('+simplejson.dumps(self.updated)+');'
def __setattr__(self, k, v):
if self._inFunc and k != '_inFunc':
self.updated[k] = v
object.__setattr__(self, k, v)
class sudoku(pwt):
def form(self):
import sudo
out = ''
n = 0
for i in range(9):
for j in range(9):
out += ' ' % (sudo.squares[n])
n += 1
out += ' '
return out
def work(self, **kw):
values = dict((s, sudo.digits) for s in sudo.squares)
for k, v in kw.iteritems():
if v:
sudo.assign(values, k, v)
for k, v in values.iteritems():
if len(v) == 1:
setattr(self, k, v)
return values
class length(pwt):
def form(self):
return '
'
def work(self):
self.output = ('a' * web.intget(self.n, 0) or ' ')
if __name__ == "__main__":
web.run(urls, globals(), web.reloader) webpy/.gitignore 0000644 0001750 0001750 00000000066 13146625266 012345 0 ustar wmb wmb *.pyc
.DS_Store
build/
dist/
docs/_build/
*.egg-info
webpy/README.md 0000644 0001750 0001750 00000000305 13146625266 011630 0 ustar wmb wmb web.py is a web framework for Python that is as simple as it is powerful.
Visit http://webpy.org/ for more information.
 webpy/ChangeLog.txt 0000644 0001750 0001750 00000044301 13146625266 012745 0 ustar wmb wmb # web.py changelog
## 2016-07-08 0.38
* Fixed failing tests in test/session.py when postgres is not installed. (tx Michael Diamond)
* Fixed an error with Python 2.3 (tx Michael Diamond)
* web.database now accepts a URL, $DATABASE_URL (fixes #171) (tx Aaron Swartz, we miss you)
* support port use 'port' as keyword for postgres database with used eith pgdb (tx Sandesh Singh)
* Fixes to FirebirdDB database (tx Ben Hanna)
* Added a gaerun method to start application for google app engine (tx Matt Habel)
* Better error message from `db.multiple_insert` when not all rows have the same keys (tx Ben Hoyt)
* Allow custom messages for most errors (tx Shaun Sharples)
* IPv6 support (tx Matthew of Boswell and zamabe)
* Fixed sending email using Amazon SES (tx asldevi)
* Fixed handling of long numbers in sqlify. closes #213. (tx cjrolo)
* Escape HTML characters when emitting API docs. (tx Jeff Zellman)
* Fixed an inconsistency in form.Dropdown when numbers are used for args and value. (tx Noprianto)
* Fixed a potential remote exeution risk in `reparam` (tx Adrián Brav)
* The where clause in db queries can be a dict now
* Added `first` method to iterbetter
* Fix to unexpected session when used with MySQL (tx suhashpatil)
* Change dburl2dict to use urlparse and to support the simple case of just a database name. (tx Jeff Zellman)
* Support '204 No Content' status code (tx Matteo Landi)
* Support `451 Unavailable For Legal Reasons` status code(tx Yannik Robin Kettenbach)
* Updates to documentation (tx goodrone, asldevi)
## 2012-06-26 0.37
* Fixed datestr issue on Windows -- #155
* Fixed Python 2.4 compatability issues (tx fredludlow)
* Fixed error in utils.safewrite (tx shuge) -- #95
* Allow use of web.data() with app.request() -- #105
* Fixed an issue with session initializaton (tx beardedprojamz) -- #109
* Allow custom message on 400 Bad Request (tx patryk) -- #121
* Made djangoerror work on GAE. -- #80
* Handle malformatted data in the urls. -- #117
* Made it easier to stop the dev server -- #100, #122
* Added support fot customizing cookie_path in session (tx larsga) -- #89
* Added exception for "415 Unsupported Media" (tx JirkaChadima) -- #145
* Added GroupedDropdown to support `` tag (tx jzellman) -- #152
* Fixed failure in embedded interpreter - #87
* Optimized web.cookies (tx benhoyt) - #148
## 2011-07-04 0.36
* Upgraded to CherryPy WSGIServer 3.2.0. -- #66
* Various Jython compatibility fixes (tx Ben Noordhuis)
* allow strips to accept lists -- #69
* Improvements to setcookie (tx lovelylain) -- #65
* Added __contains__ method to Session. (tx lovelylain) #65
* Added secure option to session. -- #38
* Fixed db.delete error with `using` clause (tx berndtj) -- #28
* Fixed the case of no where-clauses in db.where
* Fixed threadlocal error in python2.3 -- #77
* Fixed TemplateResult inconsistant behavior -- #78
* Fixed query execution issues with MSSQL -- #71
## 2011-05-15 0.35
* Better ThreaedDict implementation using threadlocal (tx Ben Hoyt)
* Make Form a new-style class -- #53
* Optimized SQLQuery.join and generation of multiple_insert query -- #58
* New: support for Amazon's Simple Email Service
* Added httponly keyword to setcookie (tx Justin Davis)
* Added httponly only option to sessions and enabled it by default (tx Justin Davis)
* made htmlquote and htmlunquote work with unicode
* Added doseq support for web.url
* New flag web.config.debug_sql to control printing of db queries (tx Nimrod S. Kerrett)
* Fixed inserting default values into MySQL -- #49
* Fixed rendering of Dropdown with mutliple values (tx krowbar) -- #43
* Fixed mutliple set-cookie header issue with session -- #45
* Fixed error in safeunicode when used with appengine datastore objects
* Fixed unicode error in generating debugerror -- #26
* Fixed GAE compilation issue -- #24
* Fixed unicode encoding issue in templates -- #17
* Fixed a bug in form.RadioButton when called with tuple options (tx fhsm) -- #13
* Fixed error in creating PostgresDB with pgdb driver (tx cninucci) -- #23
* Support auto convertion of timestamp/date datatypes in sqlite to datetime.data objects -- #22
* Fixed escaping issue on GAE -- #10
* fixed form.validates for checkbox (tx Justin Davis).
* fixed duplicate content-type in web.sendmail -- #20
* Fix: create session dirs if required (tx Martin Marcher)
* Fixed safestr to make use of encoding argument (tx s7v7nislands)
* Don't allow /static/../foo urls in dev webserver (tx Arnar Lundesgaard)
* Disabled debug mode in flup server (tx irrelative) -- #35
* And a lot of unicode fixes
## 2010-03-20 0.34
* fix: boolen test works even for sqlite results (tx Emyr Thomas for the idea)
* fix issue with loop.xx variables in templetor (Bug#476708)
* hide unwanted tracebacks in debugerror
* display correct template line numbers in debugerror
* new utilities: counter, safeiter, safewrite, requeue, restack (by Aaron Swartz)
* various form.py fixes and improvements
* automatically escape % characters in the db query (Bug#516516)
* fix non-deterministic template order (Bug#490209)
* attachment support for web.sendmail (tx gregglind)
* template.py optimizations and extension support
## 2009-10-28 0.33
* form.Button takes optional argument `html`
* remove obsolete write function in http.py (tx Justin) (Bug#315337)
* refactor httpserver.runsimple code
* improve form.py for customizability
* new: add background updating to memoize
* fix: use sendmail from web.config.sendmail_path (tx Daniel Schwartz)
* fix: make web.profiler work on Windows (tx asmo) (Bug#325139)
* fix changequery to make it work correctly even when the input has multi-valued fields (Bug#118229)
* fix: make sure sequence exists before queying for currval(seqname) when executing postgres insert query (Bug#268705)
* fix: raise web.notfound() instead of return in autodelegate (tx SeC)
* fix: raise NotSupportedError when len or bool is used on sqlite result (Bug#179644)
* fix: make db paramater optional for creating postgres DB to allow taking it from environ. (Bug#153491)
* fix unicode errors in db module
* fix: convert unicode strings to UTF8 before printing SQL queries
* fix unicode error in debugerror
* fix: don't convert file upload data to unicode even when file={} is not passed to web.input
* fix checkbox value/checked confusion (Bug#128233)
* fix: consider empty lines as part of the indented block in templetor
* fix: fix a bug in web.group
## 2009-06-04 0.32
* optional from_address to web.emailerrors
* upgrade wsgiserver to CherryPy/3.1.2
* support for extensions in Jinja2 templates (tx Zhang Huangbin)
* support web.datestr for datetime.date objects also
* support for lists in db queries
* new: uniq and iterview
* fix: set debug=False when application is run with mod_wsgi (tx Patrick Swieskowski) [Bug#370904](https://bugs.launchpad.net/webpy/+bug/370904)
* fix: make web.commify work with decimals [Bug#317204](https://bugs.launchpad.net/webpy/+bug/317204)
* fix: unicode issues with sqlite database [Bug#373219](https://bugs.launchpad.net/webpy/+bug/373219)
* fix: urlquote url when the server is lighttpd [Bug#339858](https://bugs.launchpad.net/webpy/+bug/339858)
* fix: issue with using date.format in templates
* fix: use TOP instead of LIMIT in mssql database [Bug#324049](https://bugs.launchpad.net/webpy/+bug/324049)
* fix: make sessions work well with expirations
* fix: accept both list and tuple as arg values in form.Dropdown [Bug#314970](https://bugs.launchpad.net/webpy/+bug/314970)
* fix: match parenthesis when parsing `for` statement in templates
* fix: fix python 2.3 compatibility
* fix: ignore dot folders when compiling templates (tx Stuart Langridge)
* fix: don't consume KeyboardInterrupt and SystemExit errors
* fix: make application work well with iterators
## 2008-12-10: 0.31
* new: browser module
* new: test utilities
* new: ShelfStore
* fix: web.cookies error when default is None
* fix: paramstyle for OracleDB (tx kromakey)
* fix: performance issue in SQLQuery.join
* fix: use wsgi.url_scheme to find ctx.protocol
## 2008-12-06: 0.3
* new: replace print with return (backward-incompatible )
* new: application framework (backward-incompatible )
* new: modular database system (backward-incompatible )
* new: templetor reimplementation
* new: better unicode support
* new: debug mode (web.config.debug)
* new: better db pooling
* new: sessions
* new: support for GAE
* new: etag support
* new: web.openid module
* new: web.nthstr
* fix: various form.py fixes
* fix: python 2.6 compatibility
* fix: file uploads are not loaded into memory
* fix: SQLLiteral issue (Bug#180027)
* change: web.background is moved to experimental (backward-incompatible )
* improved API doc generation (tx Colin Rothwell)
## 2008-01-19: 0.23
* fix: for web.background gotcha ([133079](http://bugs.launchpad.net/webpy/+bug/133079))
* fix: for postgres unicode bug ([177265](http://bugs.launchpad.net/webpy/+bug/177265))
* fix: web.profile behavior in python 2.5 ([133080](http://bugs.launchpad.net/webpy/+bug/133080))
* fix: only uppercase HTTP methods are allowed. ([176415](http://bugs.launchpad.net/webpy/+bug/176415))
* fix: transaction error in with statement ([125118](http://bugs.launchpad.net/webpy/+bug/125118))
* fix: fix in web.reparam ([162085](http://bugs.launchpad.net/webpy/+bug/162085))
* fix: various unicode issues ([137042](http://bugs.launchpad.net/webpy/+bug/137042), [180510](http://bugs.launchpad.net/webpy/+bug/180510), [180549](http://bugs.launchpad.net/webpy/+bug/180549), [180653](http://bugs.launchpad.net/webpy/+bug/180653))
* new: support for https
* new: support for secure cookies
* new: sendmail
* new: htmlunquote
## 2007-08-23: 0.22
* compatibility with new DBUtils API ([122112](https://bugs.launchpad.net/webpy/+bug/122112))
* fix reloading ([118683](https://bugs.launchpad.net/webpy/+bug/118683))
* fix compatibility between `changequery` and `redirect` ([118234](https://bugs.launchpad.net/webpy/+bug/118234))
* fix relative URI in `web.redirect` ([118236](https://bugs.launchpad.net/webpy/+bug/118236))
* fix `ctx._write` support in built-in HTTP server ([121908](https://bugs.launchpad.net/webpy/+bug/121908))
* fix `numify` strips things after '.'s ([118644](https://bugs.launchpad.net/webpy/+bug/118644))
* fix various unicode isssues ([114703](https://bugs.launchpad.net/webpy/+bug/114703), [120644](https://bugs.launchpad.net/webpy/+bug/120644), [124280](https://bugs.launchpad.net/webpy/+bug/124280))
## 2007-05-28: 0.21
* security fix: prevent bad characters in headers
* support for cheetah template reloading
* support for form validation
* new `form.File`
* new `web.url`
* fix rendering issues with hidden and button inputs
* fix 2.3 incompatability with `numify`
* fix multiple headers with same name
* fix web.redirect issues when homepath is not /
* new CherryPy wsgi server
* new nested transactions
* new sqlliteral
## 2006-05-09: 0.138
* New function: `intget`
* New function: `datestr`
* New function: `validaddr`
* New function: `sqlwhere`
* New function: `background`, `backgrounder`
* New function: `changequery`
* New function: `flush`
* New function: `load`, `unload`
* New variable: `loadhooks`, `unloadhooks`
* Better docs; generating [docs](documentation) from web.py now
* global variable `REAL_SCRIPT_NAME` can now be used to work around lighttpd madness
* fastcgi/scgi servers now can listen on sockets
* `output` now encodes Unicode
* `input` now takes optional `_method` argument
* Potentially-incompatible change: `input` now returns `badrequest` automatically when `requireds` aren't found
* `storify` now takes lists and dictionaries as requests (see docs)
* `redirect` now blanks any existing output
* Quote SQL better when `db_printing` is on
* Fix delay in `nomethod`
* Fix `urlquote` to encode better.
* Fix 2.3 incompatibility with `iters` (tx ??)
* Fix duplicate headers
* Improve `storify` docs
* Fix `IterBetter` to raise IndexError, not KeyError
## 2006-03-27: 0.137
* Add function `dictfindall` (tx Steve Huffman)
* Add support to `autodelegate` for arguments
* Add functions `httpdate` and `parsehttpdate`
* Add function `modified`
* Add support for FastCGI server mode
* Clarify `dictadd` documentation (tx Steve Huffman)
* Changed license to public domain
* Clean up to use `ctx` and `env` instead of `context` and `environ`
* Improved support for PUT, DELETE, etc. (tx list)
* Fix `ctx.fullpath` (tx Jesir Vargas)
* Fix sqlite support (tx Dubhead)
* Fix documentation bug in `lstrips` (tx Gregory Petrosyan)
* Fix support for IPs and ports (1/2 tx Jesir Vargas)
* Fix `ctx.fullpath` (tx Jesir Vargas)
* Fix sqlite support (tx Dubhead)
* Fix documentation bug in `lstrips` (tx Gregory Petrosyan)
* Fix `iters` bug with sets
* Fix some breakage introduced by Vargas's patch
* Fix `sqlors` bug
* Fix various small style things (tx Jesir Vargas)
* Fix bug with `input` ignoring GET input
## 2006-02-22: 0.136 (svn)
* Major code cleanup (tx to Jesir Vargas for the patch).
* 2006-02-15: 0.135
* Really fix that mysql regression (tx Sean Leach).
* 2006-02-15: 0.134
* The `StopIteration` exception is now caught. This can be used by functions that do things like check to see if a user is logged in. If the user isn't, they can output a message with a login box and raise StopIteration, preventing the caller from executing.
* Fix some documentation bugs.
* Fix mysql regression (tx mrstone).
## 2006-02-12: 0.133
* Docstrings! (tx numerous, esp. Jonathan Mark (for the patch) and Guido van Rossum (for the prod))
* Add `set` to web.iters.
* Make the `len` returned by `query` an int (tx ??).
* Backwards-incompatible change: `base` now called `prefixurl`.
* Backwards-incompatible change: `autoassign` now takes `self` and `locals()` as arguments.
## 2006-02-07: 0.132
* New variable `iters` is now a listing of possible list-like types (currently list, tuple, and, if it exists, Set).
* New function `dictreverse` turns `{1:2}` into `{2:1}`.
* `Storage` now a dictionary subclass.
* `tryall` now takes an optional prefix of functions to run.
* `sqlors` has various improvements.
* Fix a bunch of DB API bugs.
* Fix bug with `storify` when it received multiple inputs (tx Ben Woosley).
* Fix bug with returning a generator (tx Zbynek Winkler).
* Fix bug where len returned a long on query results (tx F.S).
## 2006-01-31: 0.131 (not officially released)
* New function `_interpolate` used internally for interpolating strings.
* Redone database API. `select`, `insert`, `update`, and `delete` all made consistent. Database queries can now do more complicated expressions like `$foo.bar` and `${a+b}`. You now have to explicitly pass the dictionary to look up variables in. Pass `vars=locals()` to get the old functionality of looking up variables .
* New functions `sqllist` and `sqlors` generate certain kinds of SQL.
## 2006-01-30: 0.13
* New functions `found`, `seeother`, and `tempredirect` now let you do other kinds of redirects. `redirect` now also takes an optional status parameter. (tx many)
* New functions `expires` and `lastmodified` make it easy to send those headers.
* New function `gone` returns a 410 Gone (tx David Terrell).
* New function `urlquote` applies url encoding to a string.
* New function `iterbetter` wraps an iterator and allows you to do __getitem__s on it.
* Have `query` return an `iterbetter` instead of an iterator.
* Have `debugerror` show tracebacks with the innermost frame first.
* Add `__hash__` function to `threadeddict` (and thus, `ctx`).
* Add `context.host` value for the requested host name.
* Add option `db_printing` that prints database queries and the time they take.
* Add support for database pooling (tx Steve Huffman).
* Add support for passing values to functions called by `handle`. If you do `('foo', 'value')` it will add `'value'` as an argument when it calls `foo`.
* Add support for scgi (tx David Terrell for the patch).
* Add support for web.py functions that are iterators (tx Brendan O'Connor for the patch).
* Use new database cursors on each call instead of reusing one.
* `setcookie` now takes an optional `domain` argument.
* Fix bug in autoassign.
* Fix bug where `debugerror` would break on objects it couldn't display.
* Fix bug where you couldn't do `#include`s inline.
* Fix bug with `reloader` and database calls.
* Fix bug with `reloader` and base templates.
* Fix bug with CGI mode on certain operating systems.
* Fix bug where `debug` would crash if called outside a request.
* Fix bug with `context.ip` giving weird values with proxies.
## 2006-01-29: 0.129
* Add Python 2.2 support.
## 2006-01-28: 0.128
* Fix typo in `web.profile`.
## 2006-01-28: 0.127
* Fix bug in error message if invalid dbn is sent (tx Panos Laganakos).
## 2006-01-27: 0.126
* Fix typos in Content-Type headers (tx Beat Bolli for the prod).
## 2006-01-22: 0.125
* Support Cheetah 2.0.
## 2006-01-22: 0.124
* Fix spacing bug (tx Tommi Raivio for the prod).
## 2006-01-16: 0.123
* Fix bug with CGI usage (tx Eddie Sowden for the prod).
## 2006-01-14: 0.122
* Allow DELETEs from `web.query` (tx Joost Molenaar for the prod).
## 2006-01-08: 0.121
* Allow import of submodules like `pkg.mod.cn` (tx Sridhar Ratna).
* Fix a bug in `update` (tx Sergey Khenkin).
## 2006-01-05: 0.12
* Backwards-incompatible change: `db_parameters` is now a dictionary.
* Backwards-incompatible change: `sumdicts` is now `dictadd`.
* Add support for PyGreSQL, MySQL (tx Hallgrimur H. Gunnarsson).
* Use HTML for non-Cheetah error message.
* New function `htmlquote()`.
* New function `tryall()`.
* `ctx.output` can now be set to a generator. (tx Brendan O'Connor)
## 2006-01-04: 0.117
* Add support for psycopg 1.x. (tx Gregory Price)
## 2006-01-04: 0.116
* Add support for Python 2.3. (tx Evan Jones)
## 2006-01-04: 0.115
* Fix some bugs where database queries weren't reparameterized. Oops!
* Fix a bug where `run()` wasn't getting the right functions.
* Remove a debug statement accidentally left in.
* Allow `storify` to be used on dictionaries. (tx Joseph Trent)
## 2006-01-04: 0.114
* Make `reloader` work on Windows. (tx manatlan)
* Fix some small typos that affected colorization. (tx Gregory Price)
## 2006-01-03: 0.113
* Reorganize `run()` internals so mod_python can be used. (tx Nicholas Matsakis)
## 2006-01-03: 0.112
* Make `reloader` work when `code.py` is called with a full path. (tx David Terrell)
## 2006-01-03: 0.111
* Fixed bug in `strips()`. (tx Michael Josephson)
## 2006-01-03: 0.11
* First public version.
webpy/test/ 0000755 0001750 0001750 00000000000 13146625266 011332 5 ustar wmb wmb webpy/test/doctests.py 0000644 0001750 0001750 00000000620 13146625266 013532 0 ustar wmb wmb """Run all doctests in web.py.
"""
import webtest
def suite():
modules = [
"web.application",
"web.db",
"web.form",
"web.http",
"web.net",
"web.session",
"web.template",
"web.utils",
# "web.webapi",
# "web.wsgi",
]
return webtest.doctest_suite(modules)
if __name__ == "__main__":
webtest.main()
webpy/test/wsgi.py 0000644 0001750 0001750 00000002076 13146625266 012662 0 ustar wmb wmb import webtest, web
import threading, time
class WSGITest(webtest.TestCase):
def test_layers_unicode(self):
urls = (
'/', 'uni',
)
class uni:
def GET(self):
return u"\u0C05\u0C06"
app = web.application(urls, locals())
thread = threading.Thread(target=app.run)
thread.start()
time.sleep(0.5)
b = web.browser.Browser()
r = b.open('/').read()
s = r.decode('utf8')
self.assertEqual(s, u"\u0C05\u0C06")
app.stop()
thread.join()
def test_layers_bytes(self):
urls = (
'/', 'bytes',
)
class bytes:
def GET(self):
return b'abcdef'
app = web.application(urls, locals())
thread = threading.Thread(target=app.run)
thread.start()
time.sleep(0.5)
b = web.browser.Browser()
r = b.open('/')
self.assertEqual(r.read(), b'abcdef')
app.stop()
thread.join()
if __name__ == '__main__':
webtest.main()
webpy/test/requirements2.txt 0000644 0001750 0001750 00000000070 13146625266 014675 0 ustar wmb wmb pysqlite; python_version >= '2.7'
MySQL-python
PyGreSQL
webpy/test/webtest.py 0000644 0001750 0001750 00000001247 13146625266 013365 0 ustar wmb wmb """webtest: test utilities.
"""
import sys, os
# adding current directory to path to make sure local modules can be imported
sys.path.insert(0, '.')
from web.test import *
def setup_database(dbname, driver=None, pooling=False):
if dbname == 'sqlite':
db = web.database(dbn=dbname, db='webpy.db', pooling=pooling, driver=driver)
elif dbname == 'postgres':
user = os.getenv('USER')
db = web.database(dbn=dbname, db='webpy', user=user, pw='', pooling=pooling, driver=driver)
else:
db = web.database(dbn=dbname, db='webpy', user='scott', pw='tiger', pooling=pooling, driver=driver)
db.printing = '-v' in sys.argv
return db
webpy/test/README.md 0000644 0001750 0001750 00000000643 13146625266 012614 0 ustar wmb wmb # web.py unit tests
## Setup
All databases expect a database with name `webpy` with username `scott` and password `tiger`.
## Running all tests
To run all tests:
$ python test/alltests.py
## Running individual tests
To run all tests in a file:
$ python test/db.py
To run all tests in a class:
$ python test/db.py SqliteTest
To run a single test:
$ python test/db.py SqliteTest.testUnicode
webpy/test/__init__.py 0000644 0001750 0001750 00000000000 13146625266 013431 0 ustar wmb wmb webpy/test/alltests.py 0000644 0001750 0001750 00000000301 13146625266 013531 0 ustar wmb wmb import webtest
def suite():
modules = ["doctests", "db", "application", "session", "template", "wsgi"]
return webtest.suite(modules)
if __name__ == "__main__":
webtest.main()
webpy/test/browser.py 0000644 0001750 0001750 00000003152 13146625266 013370 0 ustar wmb wmb import webtest
import web
urls = (
"/", "index",
"/hello/(.*)", "hello",
"/cookie", "cookie",
"/setcookie", "setcookie",
"/redirect", "redirect",
)
app = web.application(urls, globals())
class index:
def GET(self):
return "welcome"
class hello:
def GET(self, name):
name = name or 'world'
return "hello, " + name + '!'
class cookie:
def GET(self):
return ",".join(sorted(web.cookies().keys()))
class setcookie:
def GET(self):
i = web.input()
for k, v in i.items():
web.setcookie(k, v)
return "done"
class redirect:
def GET(self):
i = web.input(url='/')
raise web.seeother(i.url)
class BrowserTest(webtest.TestCase):
def testCookies(self):
b = app.browser()
b.open('http://0.0.0.0/setcookie?x=1&y=2')
b.open('http://0.0.0.0/cookie')
self.assertEquals(b.data, 'x,y')
def testNotfound(self):
b = app.browser()
b.open('http://0.0.0.0/notfound')
self.assertEquals(b.status, 404)
def testRedirect(self):
b = app.browser()
b.open('http://0.0.0.0:8080/redirect')
self.assertEquals(b.url, 'http://0.0.0.0:8080/')
b.open('http://0.0.0.0:8080/redirect?url=/hello/foo')
self.assertEquals(b.url, 'http://0.0.0.0:8080/hello/foo')
b.open('https://0.0.0.0:8080/redirect')
self.assertEquals(b.url, 'https://0.0.0.0:8080/')
b.open('https://0.0.0.0:8080/redirect?url=/hello/foo')
self.assertEquals(b.url, 'https://0.0.0.0:8080/hello/foo')
if __name__ == "__main__":
webtest.main()
webpy/test/db.py 0000644 0001750 0001750 00000013740 13146625266 012276 0 ustar wmb wmb """DB test"""
from __future__ import print_function
import webtest
import web
from web.py3helpers import PY2
class DBTest(webtest.TestCase):
dbname = 'postgres'
driver = None
def setUp(self):
self.db = webtest.setup_database(self.dbname, driver=self.driver)
self.db.query("CREATE TABLE person (name text, email text, active boolean)")
def tearDown(self):
# there might be some error with the current connection, delete from a new connection
self.db = webtest.setup_database(self.dbname, driver=self.driver)
self.db.query('DROP TABLE person')
def _testable(self):
try:
webtest.setup_database(self.dbname, driver=self.driver)
print("Running tests for %s" % self.__class__.__name__, file=web.debug)
return True
except ImportError as e:
print(str(e), "(ignoring %s)" % self.__class__.__name__, file=web.debug)
return False
def testUnicode(self):
# Bug#177265: unicode queries throw errors
self.db.select('person', where='name=$name', vars={'name': u'\xf4'})
def assertRows(self, n):
result = self.db.select('person')
self.assertEquals(len(list(result)), n)
def testCommit(self):
t = self.db.transaction()
self.db.insert('person', False, name='user1')
t.commit()
t = self.db.transaction()
self.db.insert('person', False, name='user2')
self.db.insert('person', False, name='user3')
t.commit()
self.assertRows(3)
def testRollback(self):
t = self.db.transaction()
self.db.insert('person', False, name='user1')
self.db.insert('person', False, name='user2')
self.db.insert('person', False, name='user3')
t.rollback()
self.assertRows(0)
def testWrongQuery(self):
# It should be possible to run a correct query after getting an error from a wrong query.
try:
self.db.select('notthere')
except:
pass
self.db.select('person')
def testNestedTransactions(self):
t1 = self.db.transaction()
self.db.insert('person', False, name='user1')
self.assertRows(1)
t2 = self.db.transaction()
self.db.insert('person', False, name='user2')
self.assertRows(2)
t2.rollback()
self.assertRows(1)
t3 = self.db.transaction()
self.db.insert('person', False, name='user3')
self.assertRows(2)
t3.commit()
t1.commit()
self.assertRows(2)
def testPooling(self):
# can't test pooling if DBUtils is not installed
try:
import DBUtils
except ImportError:
return
db = webtest.setup_database(self.dbname, pooling=True)
self.assertEquals(db.ctx.db.__class__.__module__, 'DBUtils.PooledDB')
db.select('person', limit=1)
def test_multiple_insert(self):
db = webtest.setup_database(self.dbname)
db.multiple_insert('person', [dict(name='a'), dict(name='b')], seqname=False)
assert db.select("person", where="name='a'").list()
assert db.select("person", where="name='b'").list()
def test_result_is_unicode(self):
#TODO : not sure this test has still meaning with Py3
db = webtest.setup_database(self.dbname)
self.db.insert('person', False, name='user')
name = db.select('person')[0].name
self.assertEquals(type(name), unicode if PY2 else str)
def test_result_is_true(self):
db = webtest.setup_database(self.dbname)
self.db.insert('person', False, name='user')
self.assertEquals(bool(db.select('person')), True)
def testBoolean(self):
def t(active):
name ='name-%s' % active
self.db.insert('person', False, name=name, active=active)
a = self.db.select('person', where='name=$name', vars=locals())[0].active
self.assertEquals(a, active)
t(False)
t(True)
def test_insert_default_values(self):
db = webtest.setup_database(self.dbname)
db.insert("person")
def test_where(self):
db = webtest.setup_database(self.dbname)
db.insert("person", False, name="Foo")
d = db.where("person", name="Foo").list()
assert len(d) == 1
d = db.where("person").list()
assert len(d) == 1
class PostgresTest(DBTest):
dbname = "postgres"
driver = "psycopg2"
class PostgresTest_psycopg(PostgresTest):
driver = "psycopg"
class PostgresTest_pgdb(PostgresTest):
driver = "pgdb"
class SqliteTest(DBTest):
dbname = "sqlite"
driver = "sqlite3"
def testNestedTransactions(self):
#nested transactions does not work with sqlite
pass
def testPooling(self):
# pooling is not support for sqlite
pass
class SqliteTest_pysqlite2(SqliteTest):
driver = "pysqlite2.dbapi2"
class MySQLTest_MySQLdb(DBTest):
dbname = "mysql"
driver = "MySQLdb"
def setUp(self):
self.db = webtest.setup_database(self.dbname)
# In mysql, transactions are supported only with INNODB engine.
self.db.query("CREATE TABLE person (name text, email text) ENGINE=INNODB")
def testBoolean(self):
# boolean datatype is not suppoted in MySQL (at least until v5.0)
pass
class MySQLTest_PyMySQL(MySQLTest_MySQLdb):
driver="pymysql"
class MySQLTest_MySQLConnector(MySQLTest_MySQLdb):
driver="mysql.connector"
del DBTest
def is_test(cls):
import inspect
return inspect.isclass(cls) and webtest.TestCase in inspect.getmro(cls)
# ignore db tests when the required db adapter is not found.
for t in list(globals().values()):
if is_test(t) and not t('_testable')._testable():
del globals()[t.__name__]
del t
try:
import DBUtils
except ImportError as e:
print(str(e) + "(ignoring testPooling)", file=web.debug)
if __name__ == '__main__':
webtest.main()
webpy/test/requirements.txt 0000644 0001750 0001750 00000000060 13146625266 014612 0 ustar wmb wmb psycopg2
PyMySQL
mysql-connector==2.1.4
DBUtils
webpy/test/session.py 0000644 0001750 0001750 00000005663 13146625266 013401 0 ustar wmb wmb import webtest
import web
import tempfile
class SessionTest(webtest.TestCase):
def setUp(self):
app = web.auto_application()
session = self.make_session(app)
class count(app.page):
def GET(self):
session.count += 1
return str(session.count)
class reset(app.page):
def GET(self):
session.kill()
return ""
class redirect(app.page):
def GET(self):
session.request_token = '123'
raise web.redirect('/count')
class get_session(app.page):
path = "/session/(.*)"
def GET(self, name):
return session[name]
self.app = app
self.session = session
def make_session(self, app):
dir = tempfile.mkdtemp()
store = web.session.DiskStore(tempfile.mkdtemp())
return web.session.Session(app, store, {'count': 0})
def testSession(self):
b = self.app.browser()
self.assertEquals(b.open('/count').read(), b'1')
self.assertEquals(b.open('/count').read(), b'2')
self.assertEquals(b.open('/count').read(), b'3')
b.open('/reset')
self.assertEquals(b.open('/count').read(), b'1')
def testParallelSessions(self):
b1 = self.app.browser()
b2 = self.app.browser()
b1.open('/count')
for i in range(1, 10):
self.assertEquals(b1.open('/count').read(), str(i+1).encode('utf8'))
self.assertEquals(b2.open('/count').read(), str(i).encode('utf8'))
def testBadSessionId(self):
b = self.app.browser()
self.assertEquals(b.open('/count').read(), b'1')
self.assertEquals(b.open('/count').read(), b'2')
cookie = b.cookiejar._cookies['0.0.0.0']['/']['webpy_session_id']
cookie.value = '/etc/password'
self.assertEquals(b.open('/count').read(), b'1')
def testRedirect(self):
b = self.app.browser()
b.open("/redirect")
b.open("/session/request_token")
self.assertEquals(b.data, b'123')
class DBSessionTest(SessionTest):
"""Session test with db store."""
def make_session(self, app):
db = webtest.setup_database("sqlite", "sqlite3")
#db.printing = True
db.query(""
+ "CREATE TABLE session ("
+ " session_id char(128) unique not null,"
+ " atime timestamp default (datetime('now','utc')),"
+ " data text)"
)
store = web.session.DBStore(db, 'session')
return web.session.Session(app, store, {'count': 0})
def tearDown(self):
# there might be some error with the current connection, delete from a new connection
self.db = webtest.setup_database("sqlite","sqlite3")
self.db.query('DROP TABLE session')
if __name__ == "__main__":
webtest.main()
webpy/test/template.py 0000644 0001750 0001750 00000003000 13146625266 013510 0 ustar wmb wmb import webtest
import web
from web.template import SecurityError, Template
from web.py3helpers import PY2
class TestResult:
def __init__(self, t):
self.t = t
def __getattr__(self, name):
return getattr(self.t, name)
def __repr__(self):
return repr(unicode(self.t) if PY2 else str(self.t))
def t(code, **keywords):
tmpl = Template(code, **keywords)
return lambda *a, **kw: TestResult(tmpl(*a, **kw))
class TemplateTest(webtest.TestCase):
"""Tests for the template security feature."""
def testPrint(self):
if PY2:
tpl = "$code:\n print 'blah'"
#print_function has been imported from __future__ so the print statement doesn't exist anymore
self.assertRaises(SyntaxError, t, tpl)
else:
tpl = "$code:\n print('blah')"
self.assertRaises(NameError, t(tpl))
def testRepr(self):
if PY2: #this feature doesn't exist in Py3 anymore
tpl = "$code:\n `1`"
self.assertRaises(SecurityError, t, tpl)
def testAttr(self):
tpl = '$code:\n (lambda x: x+1).func_code'
self.assertRaises(SecurityError, t, tpl)
tpl = '$def with (a)\n$code:\n a.b = 3'
self.assertRaises(SecurityError, t, tpl)
#these two should execute themselves flawlessly
t("$code:\n foo = {'a': 1}.items()")()
if not PY2:
t("$code:\n bar = {k:0 for k in [1,2,3]}")()
if __name__ == "__main__":
webtest.main()
webpy/test/application.py 0000644 0001750 0001750 00000026144 13146625266 014216 0 ustar wmb wmb import webtest
import time
import threading
import web
import urllib
try:
from urllib.parse import urlencode
except ImportError:
from urllib import urlencode
from web.py3helpers import PY2
data = """
import web
urls = ("/", "%(classname)s")
app = web.application(urls, globals(), autoreload=True)
class %(classname)s:
def GET(self):
return "%(output)s"
"""
urls = (
"/iter", "do_iter",
)
app = web.application(urls, globals())
class do_iter:
def GET(self):
yield 'hello, '
yield web.input(name='world').name
POST = GET
def write(filename, data):
f = open(filename, 'w')
f.write(data)
f.close()
class ApplicationTest(webtest.TestCase):
def test_reloader(self):
write('foo.py', data % dict(classname='a', output='a'))
import foo
app = foo.app
self.assertEquals(app.request('/').data, b'a')
# test class change
time.sleep(1)
write('foo.py', data % dict(classname='a', output='b'))
self.assertEquals(app.request('/').data, b'b')
# test urls change
time.sleep(1)
write('foo.py', data % dict(classname='c', output='c'))
self.assertEquals(app.request('/').data, b'c')
def testUppercaseMethods(self):
urls = ("/", "hello")
app = web.application(urls, locals())
class hello:
def GET(self): return "hello"
def internal(self): return "secret"
response = app.request('/', method='internal')
self.assertEquals(response.status, '405 Method Not Allowed')
def testRedirect(self):
urls = (
"/a", "redirect /hello/",
"/b/(.*)", r"redirect /hello/\1",
"/hello/(.*)", "hello"
)
app = web.application(urls, locals())
class hello:
def GET(self, name):
name = name or 'world'
return "hello " + name
response = app.request('/a')
self.assertEquals(response.status, '301 Moved Permanently')
self.assertEquals(response.headers['Location'], 'http://0.0.0.0:8080/hello/')
response = app.request('/a?x=2')
self.assertEquals(response.status, '301 Moved Permanently')
self.assertEquals(response.headers['Location'], 'http://0.0.0.0:8080/hello/?x=2')
response = app.request('/b/foo?x=2')
self.assertEquals(response.status, '301 Moved Permanently')
self.assertEquals(response.headers['Location'], 'http://0.0.0.0:8080/hello/foo?x=2')
def test_routing(self):
urls = (
"/foo", "foo"
)
class foo:
def GET(self):
return "foo"
app = web.application(urls, {"foo": foo})
self.assertEquals(app.request('/foo\n').data, b'not found')
self.assertEquals(app.request('/foo').data, b'foo')
def test_subdirs(self):
urls = (
"/(.*)", "blog"
)
class blog:
def GET(self, path):
return "blog " + path
app_blog = web.application(urls, locals())
urls = (
"/blog", app_blog,
"/(.*)", "index"
)
class index:
def GET(self, path):
return "hello " + path
app = web.application(urls, locals())
self.assertEquals(app.request('/blog/foo').data, b'blog foo')
self.assertEquals(app.request('/foo').data, b'hello foo')
def processor(handler):
return web.ctx.path + ":" + handler()
app.add_processor(processor)
self.assertEquals(app.request('/blog/foo').data, b'/blog/foo:blog foo')
def test_subdomains(self):
def create_app(name):
urls = ("/", "index")
class index:
def GET(self):
return name
return web.application(urls, locals())
urls = (
"a.example.com", create_app('a'),
"b.example.com", create_app('b'),
".*.example.com", create_app('*')
)
app = web.subdomain_application(urls, locals())
def test(host, expected_result):
result = app.request('/', host=host)
self.assertEquals(result.data, expected_result)
test('a.example.com', b'a')
test('b.example.com', b'b')
test('c.example.com', b'*')
test('d.example.com', b'*')
def test_redirect(self):
urls = (
"/(.*)", "blog"
)
class blog:
def GET(self, path):
if path == 'foo':
raise web.seeother('/login', absolute=True)
else:
raise web.seeother('/bar')
app_blog = web.application(urls, locals())
urls = (
"/blog", app_blog,
"/(.*)", "index"
)
class index:
def GET(self, path):
return "hello " + path
app = web.application(urls, locals())
response = app.request('/blog/foo')
self.assertEquals(response.headers['Location'], 'http://0.0.0.0:8080/login')
response = app.request('/blog/foo', env={'SCRIPT_NAME': '/x'})
self.assertEquals(response.headers['Location'], 'http://0.0.0.0:8080/x/login')
response = app.request('/blog/foo2')
self.assertEquals(response.headers['Location'], 'http://0.0.0.0:8080/blog/bar')
response = app.request('/blog/foo2', env={'SCRIPT_NAME': '/x'})
self.assertEquals(response.headers['Location'], 'http://0.0.0.0:8080/x/blog/bar')
def test_processors(self):
urls = (
"/(.*)", "blog"
)
class blog:
def GET(self, path):
return 'blog ' + path
state = web.storage(x=0, y=0)
def f():
state.x += 1
app_blog = web.application(urls, locals())
app_blog.add_processor(web.loadhook(f))
urls = (
"/blog", app_blog,
"/(.*)", "index"
)
class index:
def GET(self, path):
return "hello " + path
app = web.application(urls, locals())
def g():
state.y += 1
app.add_processor(web.loadhook(g))
app.request('/blog/foo')
assert state.x == 1 and state.y == 1, repr(state)
app.request('/foo')
assert state.x == 1 and state.y == 2, repr(state)
def testUnicodeInput(self):
urls = (
"(/.*)", "foo"
)
class foo:
def GET(self, path):
i = web.input(name='')
return repr(i.name)
def POST(self, path):
if path == '/multipart':
i = web.input(file={})
return i.file.value
else:
i = web.input()
return repr(dict(i)).replace('u','')
app = web.application(urls, locals())
def f(name):
path = '/?' + urlencode({"name": name.encode('utf-8')})
self.assertEquals(app.request(path).data.decode('utf-8'), repr(name))
f(u'\u1234')
f(u'foo')
response = app.request('/', method='POST', data=dict(name='foo'))
self.assertEquals(response.data, b"{'name': 'foo'}")
data = '--boundary\r\nContent-Disposition: form-data; name="x"\r\n\r\nfoo\r\n--boundary\r\nContent-Disposition: form-data; name="file"; filename="a.txt"\r\nContent-Type: text/plain\r\n\r\na\r\n--boundary--\r\n'
headers = {'Content-Type': 'multipart/form-data; boundary=boundary'}
response = app.request('/multipart', method="POST", data=data, headers=headers)
self.assertEquals(response.data, b'a')
def testCustomNotFound(self):
urls_a = ("/", "a")
urls_b = ("/", "b")
app_a = web.application(urls_a, locals())
app_b = web.application(urls_b, locals())
app_a.notfound = lambda: web.HTTPError("404 Not Found", {}, "not found 1")
urls = (
"/a", app_a,
"/b", app_b
)
app = web.application(urls, locals())
def assert_notfound(path, message):
response = app.request(path)
self.assertEquals(response.status.split()[0], "404")
self.assertEquals(response.data, message)
assert_notfound("/a/foo", b"not found 1")
assert_notfound("/b/foo", b"not found")
app.notfound = lambda: web.HTTPError("404 Not Found", {}, "not found 2")
assert_notfound("/a/foo", b"not found 1")
assert_notfound("/b/foo", b"not found 2")
def testIter(self):
self.assertEquals(app.request('/iter').data, b'hello, world')
self.assertEquals(app.request('/iter?name=web').data, b'hello, web')
self.assertEquals(app.request('/iter', method='POST').data, b'hello, world')
self.assertEquals(app.request('/iter', method='POST', data='name=web').data, b'hello, web')
def testUnload(self):
x = web.storage(a=0)
urls = (
"/foo", "foo",
"/bar", "bar"
)
class foo:
def GET(self):
return "foo"
class bar:
def GET(self):
raise web.notfound()
app = web.application(urls, locals())
def unload():
x.a += 1
app.add_processor(web.unloadhook(unload))
app.request('/foo')
self.assertEquals(x.a, 1)
app.request('/bar')
self.assertEquals(x.a, 2)
def test_changequery(self):
urls = (
'/', 'index',
)
class index:
def GET(self):
return web.changequery(x=1)
app = web.application(urls, locals())
def f(path):
return app.request(path).data
self.assertEquals(f('/?x=2'), b'/?x=1')
p = f('/?y=1&y=2&x=2')
self.assertTrue(p == b'/?y=1&y=2&x=1' or p == b'/?x=1&y=1&y=2')
def test_setcookie(self):
urls = (
'/', 'index',
)
class index:
def GET(self):
web.setcookie("foo", "bar")
return "hello"
app = web.application(urls, locals())
def f(script_name=""):
response = app.request("/", env={"SCRIPT_NAME": script_name})
return response.headers['Set-Cookie']
self.assertEquals(f(''), 'foo=bar; Path=/')
self.assertEquals(f('/admin'), 'foo=bar; Path=/admin/')
def test_stopsimpleserver(self):
urls = (
'/', 'index',
)
class index:
def GET(self):
pass
app = web.application(urls, locals())
thread = threading.Thread(target=app.run)
thread.start()
time.sleep(1)
self.assertTrue(thread.isAlive())
app.stop()
thread.join(timeout=1)
self.assertFalse(thread.isAlive())
if __name__ == '__main__':
webtest.main()
webpy/setup.py 0000644 0001750 0001750 00000001105 13146625266 012062 0 ustar wmb wmb #!/usr/bin/env python
from setuptools import setup
from web import __version__
setup(name='web.py',
version=__version__,
description='web.py: makes web apps',
author='Aaron Swartz',
author_email='me@aaronsw.com',
maintainer='Anand Chitipothu',
maintainer_email='anandology@gmail.com',
url=' http://webpy.org/',
packages=['web', 'web.wsgiserver', 'web.contrib'],
long_description="Think about the ideal way to write a web app. Write the code to make it happen.",
license="Public domain",
platforms=["any"],
)
webpy/docs/ 0000755 0001750 0001750 00000000000 13146625266 011303 5 ustar wmb wmb webpy/docs/db.rst 0000644 0001750 0001750 00000012016 13146625266 012422 0 ustar wmb wmb Accessing the database
======================
Web.py provides a simple and uniform interface to the database that you want to work with, whether it is PostgreSQL, MySQL, SQLite or any other. It doesn't try to build layers between you and your database. Rather, it tries to make it easy to perform common tasks, and get out of your way when you need to do more advanced things.
Create database object
------------------------
The first thing to work with databases from web.py is to create a
create a database object with `web.database()`. It returns database object, which has convenient methods for you to use.
Make sure that you have appropriate database library installed (`psycopg2` for PostgreSQL, `MySQLdb` for MySQL, `sqlite3` for SQLite).
::
db = web.database(dbn='postgres', db='dbname', user='username', pw='password')
`dbn` for MySQL is `mysql` and `sqlite` for SQLite. SQLite doesn't take `user` `pw` parameters.
Multiple databases
``````````````````
Working with more databases is not at all difficult with web.py. Here's what you do.
::
db1 = web.database(dbn='postgres', db='dbname1', user='username1', pw='password2')
db2 = web.database(dbn='postgres', db='dbname2', user='username2', pw='password2')
And use `db1`, `db2` to access those databases respectively.
Operations
----------
`web.database()` returns an object which provide you all the functionality to insert, select, update and delete data from your database. For each of the methods on `db` below, you can pass `_test=True` to see the SQL statement rather than executing it.
Inserting
`````````
::
# Insert an entry into table 'user'
userid = db.insert('user', firstname="Bob", lastname="Smith", joindate=web.SQLLiteral("NOW()"))
The first argument is the table name and the rest of them are set of named arguments which represent the fields in the table. If values are not given, the database may create default values or issue a warning.
For bulk insertion rather than inserting record by record, use `Multiple Inserts` rather.
Selecting
`````````
The `select` method is used for selecting rows from the database. It returns a `web.iterbetter` object, which can be looped through.
To select `all` the rows from the `user` table, you would simply do
::
users = db.select('user')
For the real world use cases, `select` method takes `vars`, `what`, `where`, `order`, `group`, `limit`, `offset`, and `_test` optional parameters.
::
users = db.select('users', where="id>100")
To prevent SQL injection attacks, you can use `$key` in where clause and pass the `vars` which has { 'key': value }.
::
vars = dict(name="Bob")
results = db.select('users', where="name = $name", vars=vars, _test=True)
>>> results
Updating
````````
The `update` method accepts same kind of arguments as Select. It returns the number of rows updated.
::
num_updated = db.update('users', where="id = 10", firstname = "Foo")
Deleting
````````
The `delete` method returns the number of rows deleted. It also accepts "using" and "vars" parameters. See ``Selecting`` for more details on `vars`.
::
num_deleted = db.delete('users', where="id=10")
Multiple Inserts
````````````````
The `multiple_insert` method on the `db` object allows you to do that. All that's needed is to prepare a list of dictionaries, one for each row to be inserted, each with the same set of keys and pass it to `multiple_insert` along with the table name. It returns the list of ids of the inserted rows.
The value of `db.supports_multiple_insert` tells you if your database supports multiple inserts.
::
values = [{"name": "foo", "email": "foo@example.com"}, {"name": "bar", "email": "bar@example.com"}]
db.multiple_insert('person', values=values)
Advanced querying
`````````````````
Many a times, there is more to do with the database, rather than the simple operations which can be done by `insert`, `select`, `delete` and `update` - Things like your favorite (or scary) joins, counts etc. All these are possible with `query` method, which also takes `vars`.
::
results = db.query("SELECT COUNT(*) AS total_users FROM users")
print results[0].total_users # prints number of entries in 'users' table
Joining tables
::
results = db.query("SELECT * FROM entries JOIN users WHERE entries.author_id = users.id")
Transactions
````````````
The database object has a method `transaction` which starts a new transaction and returns the transaction object. The transaction object can be used to commit or rollback that transaction. It is also possible to have nested transactions.
From Python 2.5 onwards, which support `with` statements, you would do
::
with db.transaction():
userid = db.insert('users', name='foo')
authorid = db.insert('authors', userid=userid)
For earlier versions of Python, you can do
::
t = db.transaction()
try:
userid = db.insert('users', name='foo')
authorid = db.insert('authors', userid=userid)
except:
t.rollback()
raise
else:
t.commit()
webpy/docs/templating.rst 0000644 0001750 0001750 00000016475 13146625266 014216 0 ustar wmb wmb Templating
==========
There are almost as many Python templating systems as there are web
frameworks (and, indeed, it seems like many templating systems are
adopting web framework-like features). The following are the goals of `templetor`, which is the (codename of) templating system of web.py.
1. The templating system has to *look* decent. No ``<%#foo#%>`` crud.
2. Reuse Python terms and semantics as much as possible.
3. Expressive enough to do real computation.
4. Usable for any text language, not just HTML and XML.
And requirements for the implementation as well:
4. Sandboxable so that you can let untrusted users write templates.
5. Simple and fast implementation.
So here it is.
Variable substitution
---------------------
::
Look, a $string.
Hark, an ${arbitrary + expression}.
Gawk, a $dictionary[key].function('argument').
Cool, a $(limit)ing.
Stop, \$money isn't evaluated.
We use basically the same semantics as (rejected) `PEP
215 `__. Variables can go
anywhere in a document.
Newline suppression
-------------------
::
If you put a backslash \
at the end of a line \
(like these) \
then there will be no newline.
renders as all one line.
Expressions
-----------
::
Here are some expressions:
$for var in iterator: I like $var!
$if times > max:
Stop! In the name of love.
$else:
Keep on, you can do it.
That's all, folks.
All your old Python friends are here: ``if``, ``while``, ``for``,
``else``, ``break``, ``continue``, and ``pass`` also act as you'd
expect. (Obviously, you can't have variables named any of these.) The
Python code starts at the ``$`` and ends at the ``:``. The ``$`` has to
be at the beginning of the line, but that's not such a burden because of
newline suppression (above).
Also, we're very careful about spacing -- all the lines will render with
no spaces at the beginning. (Open question: what if you want spaces at
the beginning?) Also, a trailing space might break your code.
There are a couple changes from Python: ``for`` and ``while`` now take
an ``else`` clause that gets called if the loop is never evaluated.
(Possible feature to add: Django-style for loop variables.)
Comments
--------
::
$# Here's where we hoodwink the folks at home:
Please enter in your deets:
CC: [ ] $#this is the important one
SSN: $#Social Security Number#$ [ ]
Comments start with ``$#`` and go to ``#$`` or the end of the line,
whichever is first.
Code
----
**NOTE: This feature has not been implemented in the current web.py
implementation of templetor.**
::
Sometimes you just need to break out the Python.
$ mapping = {
$ 'cool': ['nice', 'sweet', 'hot'],
$ 'suck': ['bad', 'evil', 'awful']
$ }
Isn't that $mapping[thought]?
That's$ del mapping $ fine with me.
$ complicatedfunc()
$ for x in bugs:
$ if bug.level == 'severe':
Ooh, this one is bad.
$ continue
And there's $x...
**Body of loops have to be indented with exactly 4 spaces.**
Code begins with a ``$`` and a space and goes until the next ``$`` or
the end of the line, whichever comes first. Nothing ever gets output if
the first character after the ``$`` is a space (so ``complicatedfunc``
above doesn't write anything to the screen like it might without the
space).
Python integration
------------------
A template begins with a line like this:
::
$def with (name, title, company='BigCo')
which declares that the template takes those arguments. (The ``with``
keyword is special, like ``def`` or ``if``.)
**Don't forget to put spaces in the definition**
The following *will not work*:
::
$def with (name,title,company='BigCo')
Inside Python, the template looks like a function that takes these
arguments. It returns a storage object with the special property that
evaluating it as a string returns the value of the body of the template.
The elements in the storage object are the results of the ``def``\ s and
the ``set``\ s.
Perhaps an example will make this clearer. Here's a template, "entry":
::
$def with (post)
$var title: $post.title
$markdown(post.body)
by $post.author
Here's another; "base":
::
$def with (self)
$self.title
$self.title
$:self
Now let's say we compile both from within Python, the first as
``entry``, the second as ``base``. Here's how we might use them:
::
print base( entry( post ) )
``entry`` takes the argument post and returns an object whose string
value is a bit of HTML showing the post with its title in the property
``title``. ``base`` takes this object and places the title in the
appropriate place and displays the page itself in the body of the page.
The Python code prints out the result.
*Where did ``markdown`` come from? It wasn't passed as an argument.* You
can pass a list of functions and variables to the template compiler to
be made globally available to templates. *Why $:self?* See below
Here's an example:
::
import template
render = template.render('templates/')
template.Template.globals['len'] = len
print render.base(render.message('Hello, world!'))
The first line imports templetor. The second says that our templates are
in the directory ``templates/``. The third give all our templates access
to the ``len`` function. The fourth grabs the template ``message.html``,
passes it the argument ``'Hello, world!'``, passes the result of
rendering it to `mcitp `__ the
template ``base.html`` and prints the result. (If your templates don't
end in ``.html`` or ``.xml``, templetor will still find them, but it
won't do its automatic HTML-encoding.)
Turning Off Filter
------------------
By default ``template.render`` will use ``web.websafe`` filter to do
HTML-encoding. To turn it off, put a : after the $ as in:
::
$:form.render()
Output from form.render() will be displayed as is.
::
$:fooBar $# fooBar = lorem ipsum
Output from variable in template will be displayed as is.
Including / nesting templates
-----------------------------
If you want to nest one template within another, you nest the
``render()`` calls, and then include the variable (unfiltered) in the
page. In your handler:
::
print render.foo(render.bar())
or (to make things a little more clear):
::
barhtml = render.bar()
print render.foo(barhtml)
Then in the template ``foo.html``:
::
$def with (bar)
html goes here
$:bar
more html
This replaces the ``$:bar`` with the output of the ``render.bar()`` call
(which is why it must be ``$:``/unfiltered, so that you get un-encoded
HTML (unless you want something else of course)). You can pass variables
in, in the same way:
::
print render.foo(render.bar(baz), qux)
In the template bar (``bar.html``):
::
$def with (baz)
bar stuff goes here + baz
In template foo (``foo.html``):
::
$def with (bar, qux)
html goes here
$:bar
Value of qux is $qux
Escaping
--------
web.py automatically escapes any variables used in templates, so that if for some reason name is set to a value containing some HTML, it will get properly escaped and appear as plain text. If you want to turn this off, write $:name instead of $name.
webpy/docs/index.rst 0000644 0001750 0001750 00000002030 13146625266 013137 0 ustar wmb wmb .. web.py documentation master file, created by
sphinx-quickstart on Sun Oct 27 15:35:05 2013.
You can adapt this file completely to your liking, but it should at least
contain the root `toctree` directive.
Welcome to web.py's documentation!
==================================
Contents:
.. toctree::
:maxdepth: 3
urlmapping
input
db
templating
deploying
api
Getting Started
===============
Building webapps with web.py is easy. To get started, save the following code as say, `hello.py` and run it with `python hello.py`. Now point your browser to `http://localhost:8080/` which responds you with 'Hello, world!'. Hey, you're done with your first program with with web.py - with just 8 lines of code!
::
import web
urls = ("/.*", "hello")
app = web.application(urls, globals())
class hello:
def GET(self):
return 'Hello, world!'
if __name__ == "__main__":
app.run()
Indices and tables
==================
* :ref:`genindex`
* :ref:`modindex`
* :ref:`search`
webpy/docs/api.rst 0000644 0001750 0001750 00000001126 13146625266 012606 0 ustar wmb wmb web.py API
==========
web.application
---------------
.. automodule:: web.application
:members:
web.db
------
.. automodule:: web.db
:members:
web.net
-------
.. automodule:: web.net
:members:
web.form
--------
.. automodule:: web.form
:members:
web.http
--------
.. automodule:: web.http
:members:
web.session
-----------
.. automodule:: web.session
:members:
web.template
------------
.. automodule:: web.template
:members:
web.utils
---------
.. automodule:: web.utils
:members:
web.webapi
----------
.. automodule:: web.webapi
:members:
webpy/docs/Makefile 0000644 0001750 0001750 00000015146 13146625266 012752 0 ustar wmb wmb # Makefile for Sphinx documentation
#
# You can set these variables from the command line.
SPHINXOPTS =
SPHINXBUILD = sphinx-build
PAPER =
BUILDDIR = _build
# User-friendly check for sphinx-build
ifeq ($(shell which $(SPHINXBUILD) >/dev/null 2>&1; echo $$?), 1)
$(error The '$(SPHINXBUILD)' command was not found. Make sure you have Sphinx installed, then set the SPHINXBUILD environment variable to point to the full path of the '$(SPHINXBUILD)' executable. Alternatively you can add the directory with the executable to your PATH. If you don't have Sphinx installed, grab it from http://sphinx-doc.org/)
endif
# Internal variables.
PAPEROPT_a4 = -D latex_paper_size=a4
PAPEROPT_letter = -D latex_paper_size=letter
ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
# the i18n builder cannot share the environment and doctrees with the others
I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
.PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest gettext
help:
@echo "Please use \`make ' where is one of"
@echo " html to make standalone HTML files"
@echo " dirhtml to make HTML files named index.html in directories"
@echo " singlehtml to make a single large HTML file"
@echo " pickle to make pickle files"
@echo " json to make JSON files"
@echo " htmlhelp to make HTML files and a HTML help project"
@echo " qthelp to make HTML files and a qthelp project"
@echo " devhelp to make HTML files and a Devhelp project"
@echo " epub to make an epub"
@echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter"
@echo " latexpdf to make LaTeX files and run them through pdflatex"
@echo " latexpdfja to make LaTeX files and run them through platex/dvipdfmx"
@echo " text to make text files"
@echo " man to make manual pages"
@echo " texinfo to make Texinfo files"
@echo " info to make Texinfo files and run them through makeinfo"
@echo " gettext to make PO message catalogs"
@echo " changes to make an overview of all changed/added/deprecated items"
@echo " xml to make Docutils-native XML files"
@echo " pseudoxml to make pseudoxml-XML files for display purposes"
@echo " linkcheck to check all external links for integrity"
@echo " doctest to run all doctests embedded in the documentation (if enabled)"
clean:
rm -rf $(BUILDDIR)/*
html:
$(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
@echo
@echo "Build finished. The HTML pages are in $(BUILDDIR)/html."
dirhtml:
$(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml
@echo
@echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml."
singlehtml:
$(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml
@echo
@echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml."
pickle:
$(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle
@echo
@echo "Build finished; now you can process the pickle files."
json:
$(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json
@echo
@echo "Build finished; now you can process the JSON files."
htmlhelp:
$(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp
@echo
@echo "Build finished; now you can run HTML Help Workshop with the" \
".hhp project file in $(BUILDDIR)/htmlhelp."
qthelp:
$(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp
@echo
@echo "Build finished; now you can run "qcollectiongenerator" with the" \
".qhcp project file in $(BUILDDIR)/qthelp, like this:"
@echo "# qcollectiongenerator $(BUILDDIR)/qthelp/webpy.qhcp"
@echo "To view the help file:"
@echo "# assistant -collectionFile $(BUILDDIR)/qthelp/webpy.qhc"
devhelp:
$(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp
@echo
@echo "Build finished."
@echo "To view the help file:"
@echo "# mkdir -p $$HOME/.local/share/devhelp/webpy"
@echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/webpy"
@echo "# devhelp"
epub:
$(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub
@echo
@echo "Build finished. The epub file is in $(BUILDDIR)/epub."
latex:
$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
@echo
@echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex."
@echo "Run \`make' in that directory to run these through (pdf)latex" \
"(use \`make latexpdf' here to do that automatically)."
latexpdf:
$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
@echo "Running LaTeX files through pdflatex..."
$(MAKE) -C $(BUILDDIR)/latex all-pdf
@echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
latexpdfja:
$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
@echo "Running LaTeX files through platex and dvipdfmx..."
$(MAKE) -C $(BUILDDIR)/latex all-pdf-ja
@echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
text:
$(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text
@echo
@echo "Build finished. The text files are in $(BUILDDIR)/text."
man:
$(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man
@echo
@echo "Build finished. The manual pages are in $(BUILDDIR)/man."
texinfo:
$(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
@echo
@echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo."
@echo "Run \`make' in that directory to run these through makeinfo" \
"(use \`make info' here to do that automatically)."
info:
$(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
@echo "Running Texinfo files through makeinfo..."
make -C $(BUILDDIR)/texinfo info
@echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo."
gettext:
$(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale
@echo
@echo "Build finished. The message catalogs are in $(BUILDDIR)/locale."
changes:
$(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes
@echo
@echo "The overview file is in $(BUILDDIR)/changes."
linkcheck:
$(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck
@echo
@echo "Link check complete; look for any errors in the above output " \
"or in $(BUILDDIR)/linkcheck/output.txt."
doctest:
$(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest
@echo "Testing of doctests in the sources finished, look at the " \
"results in $(BUILDDIR)/doctest/output.txt."
xml:
$(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml
@echo
@echo "Build finished. The XML files are in $(BUILDDIR)/xml."
pseudoxml:
$(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml
@echo
@echo "Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml."
webpy/docs/input.rst 0000644 0001750 0001750 00000005715 13146625266 013204 0 ustar wmb wmb Accessing User Input
====================
While building web applications, one basic and important thing is to respond to the user input that is sent to the server.
Web.py makes it easy to access that whether it is parameters in the url (`GET` request) or the form data (`POST` or `PUT` request). The `web.input()` method returns a dictionary-like object (more specifically a `web.storage` object) that contains the user input, whatever the request method is.
To access the URL parameters (?key=value) from the `web.input` object, just use `web.input().key`.
GET
---
For a URL which looks like `/page?id=1&action=edit`, you do
::
class Page(object):
def GET(self):
data = web.input()
id = int(data.id) # all the inputs are now strings. Cast it to int, to get integer.
action = data.action
...
`KeyError` exception is thrown if `key` is not there in the URL parameters.
Web.py makes it easier to handle that with default values to web.input().
::
class Page(object):
def GET(self):
data = web.input(id=1, action='read')
id, action = int(data.id), data.action
...
POST
----
It works exactly the same way with POST method. If you have a form with `name` and `password` elements, you would do
::
class Login(object):
def POST(self):
data = web.input()
name, password = data.name, data.password
...
Multiple inputs with same name
------------------------------
What if you have a URL which looks like `/page?id=1&id=2&id=3` or you have a form with multiple selects? What would `web.input().id` give us? It simply swallows all but one value. But to let web.input() know that we're expecting more values with the same name is simple. Just pass `[]` as the default argument for that name.
::
class Page(object):
def GET(self):
data = web.input(id=[])
ids = data.id # now, `ids` is a list with all the `id`s.
...
File uploads
------------
Uploading files is easy with web.py. `web.input()` takes care of that too. Just make sure that the upload form has an attribute enctype="multipart/form-data". The `input()` gives you `filename` and `value`, which are the uploaded file name and the contents of it, respectively.
To make things simpler, it also gives you `file`, a file-like object if you pass `myfile={}` where `myfile` is the name of the input element in your form.
::
class Upload(object):
def GET(self):
return render.upload()
def POST(self):
data = web.input(myfile={})
fp = data.myfile
save(fp) # fp.filename, fp.read() gives name and contents of the file
...
or
::
class Upload(object):
...
def POST(self):
data = web.input() # notice that `myfile={}` is missing here.
fp = data.myfile
save(fp.filename, fp.value)
...
webpy/docs/deploying.rst 0000644 0001750 0001750 00000006023 13146625266 014030 0 ustar wmb wmb Deploying web.py applications
=============================
FastCGI
-------
web.py uses `flup`_ library for supporting fastcgi. Make sure it is installed.
.. _flup: http://trac.saddi.com/flup
You just need to make sure you applicaiton file is executable. Make it so by adding the following line to tell the system to execute it using python::
#! /usr/bin/env python
and setting the exeutable flag on the file::
chmod +x /path/to/yourapp.py
Configuring lighttpd
^^^^^^^^^^^^^^^^^^^^
Here is a sample lighttpd configuration file to expose a web.py app using fastcgi. ::
# Enable mod_fastcgi and mod_rewrite modules
server.modules += ( "mod_fastcgi" )
server.modules += ( "mod_rewrite" )
# configure the application
fastcgi.server = ( "/yourapp.py" =>
((
# path to the socket file
"socket" => "/tmp/yourapp-fastcgi.socket",
# path to the application
"bin-path" => "/path/to/yourapp.py",
# number of fastcgi processes to start
"max-procs" => 1,
"bin-environment" => (
"REAL_SCRIPT_NAME" => ""
),
"check-local" => "disable"
))
)
url.rewrite-once = (
# favicon is usually placed in static/
"^/favicon.ico$" => "/static/favicon.ico",
# Let lighttpd serve resources from /static/.
# The web.py dev server automatically servers /static/, but this is
# required when deploying in production.
"^/static/(.*)$" => "/static/$1",
# everything else should go to the application, which is already configured above.
"^/(.*)$" => "/yourapp.py/$1",
)
With this configuration lighttpd takes care of starting the application. The webserver talks to your application using fastcgi via a unix domain socket. This means both the webserver and the application will run on the same machine.
nginx + Gunicorn
----------------
Gunicorn 'Green Unicorn' is a Python WSGI HTTP Server for UNIX. It's a pre-fork worker model ported from Ruby's Unicorn project.
To make a web.py application work with gunicorn, you'll need to get the wsgi app from web.py application object. ::
import web
...
app = web.application(urls, globals())
# get the wsgi app from web.py application object
wsgiapp = app.wsgifunc()
Once that change is made, gunicorn server be started using::
gunicorn -w 4 -b 127.0.0.1:4000 yourapp:wsgiapp
This starts gunicorn with 4 workers and listens at port 4000 on localhost.
It is best to use Gunicorn behind HTTP proxy server. The gunicorn team strongly advises to use nginx.
Here is a sample nginx configuration which proxies to application running on `127.0.0.1:4000`. ::
server {
listen 80;
server_name example.org;
access_log /var/log/nginx/example.log;
location / {
proxy_pass http://127.0.0.1:4000;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
}
} webpy/docs/conf.py 0000644 0001750 0001750 00000017714 13146625266 012614 0 ustar wmb wmb # -*- coding: utf-8 -*-
#
# web.py documentation build configuration file, created by
# sphinx-quickstart on Sun Oct 27 15:35:05 2013.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.autodoc']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'web.py'
copyright = u''
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.37'
# The full version, including alpha/beta/rc tags.
release = '0.37'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# " v documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'webpydoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'webpy.tex', u'web.py Documentation',
u'Anand Chitipothu', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'webpy', u'web.py Documentation',
[u'Anand Chitipothu'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'webpy', u'web.py Documentation',
u'Anand Chitipothu', 'webpy', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
webpy/tools/ 0000755 0001750 0001750 00000000000 13146625266 011513 5 ustar wmb wmb webpy/tools/_makedoc.py 0000644 0001750 0001750 00000005060 13146625266 013630 0 ustar wmb wmb import os
import web
class Parser:
def __init__(self):
self.mode = 'normal'
self.text = ''
def go(self, pyfile):
for line in file(pyfile):
if self.mode == 'in def':
self.text += ' ' + line.strip()
if line.strip().endswith(':'):
if self.definition(self.text):
self.text = ''
self.mode = 'in func'
else:
self.text = ''
self.mode = 'normal'
elif self.mode == 'in func':
if '"""' in line:
self.text += line.strip().strip('"')
self.mode = 'in doc'
if line.count('"""') == 2:
self.mode = 'normal'
self.docstring(self.text)
self.text = ''
else:
self.mode = 'normal'
elif self.mode == 'in doc':
self.text += ' ' + line
if '"""' in line:
self.mode = 'normal'
self.docstring(self.text.strip().strip('"'))
self.text = ''
elif line.startswith('## '):
self.header(line.strip().strip('#'))
elif line.startswith('def ') or line.startswith('class '):
self.text += line.strip().strip(':')
if line.strip().endswith(':'):
if self.definition(self.text):
self.text = ''
self.mode = 'in func'
else:
self.text = ''
self.mode = 'normal'
else:
self.mode = 'in def'
def clean(self, text):
text = text.strip()
text = text.replace('*', r'\*')
return text
def definition(self, text):
text = web.lstrips(text, 'def ')
if text.startswith('_') or text.startswith('class _'):
return False
print '`'+text.strip()+'`'
return True
def docstring(self, text):
print ' :', text.strip()
print
def header(self, text):
print '##', text.strip()
print
for pyfile in os.listdir('trunk/web'):
if pyfile[-2:] == 'py':
print
print '## ' + pyfile
print
Parser().go('trunk/web/' + pyfile)
print '`ctx`\n :',
print '\n'.join(' '+x for x in web.ctx.__doc__.strip().split('\n'))
webpy/tools/markdown.py 0000755 0001750 0001750 00000054705 13146625266 013725 0 ustar wmb wmb #!/usr/bin/python
import re, md5, sys, string
"""markdown.py: A Markdown-styled-text to HTML converter in Python.
Usage:
./markdown.py textfile.markdown
Calling:
import markdown
somehtml = markdown.markdown(sometext)
For other versions of markdown, see:
http://www.freewisdom.org/projects/python-markdown/
http://en.wikipedia.org/wiki/Markdown
"""
__version__ = '1.0.1-2' # port of 1.0.1
__license__ = "GNU GPL 2"
__author__ = [
'John Gruber ',
'Tollef Fog Heen ',
'Aaron Swartz '
]
def htmlquote(text):
"""Encodes `text` for raw use in HTML."""
text = text.replace("&", "&") # Must be done first!
text = text.replace("<", "<")
text = text.replace(">", ">")
text = text.replace("'", "'")
text = text.replace('"', """)
return text
def semirandom(seed):
x = 0
for c in md5.new(seed).digest(): x += ord(c)
return x / (255*16.)
class _Markdown:
emptyelt = " />"
tabwidth = 4
escapechars = '\\`*_{}[]()>#+-.!'
escapetable = {}
for char in escapechars:
escapetable[char] = md5.new(char).hexdigest()
r_multiline = re.compile("\n{2,}")
r_stripspace = re.compile(r"^[ \t]+$", re.MULTILINE)
def parse(self, text):
self.urls = {}
self.titles = {}
self.html_blocks = {}
self.list_level = 0
text = text.replace("\r\n", "\n")
text = text.replace("\r", "\n")
text += "\n\n"
text = self._Detab(text)
text = self.r_stripspace.sub("", text)
text = self._HashHTMLBlocks(text)
text = self._StripLinkDefinitions(text)
text = self._RunBlockGamut(text)
text = self._UnescapeSpecialChars(text)
return text
r_StripLinkDefinitions = re.compile(r"""
^[ ]{0,%d}\[(.+)\]: # id = $1
[ \t]*\n?[ \t]*
(\S+?)>? # url = $2
[ \t]*\n?[ \t]*
(?:
(?<=\s) # lookbehind for whitespace
[\"\(] # " is backlashed so it colorizes our code right
(.+?) # title = $3
[\"\)]
[ \t]*
)? # title is optional
(?:\n+|\Z)
""" % (tabwidth-1), re.MULTILINE|re.VERBOSE)
def _StripLinkDefinitions(self, text):
def replacefunc(matchobj):
(t1, t2, t3) = matchobj.groups()
#@@ case sensitivity?
self.urls[t1.lower()] = self._EncodeAmpsAndAngles(t2)
if t3 is not None:
self.titles[t1.lower()] = t3.replace('"', '"')
return ""
text = self.r_StripLinkDefinitions.sub(replacefunc, text)
return text
blocktagsb = r"p|div|h[1-6]|blockquote|pre|table|dl|ol|ul|script|math"
blocktagsa = blocktagsb + "|ins|del"
r_HashHTMLBlocks1 = re.compile(r"""
( # save in $1
^ # start of line (with /m)
<(%s) # start tag = $2
\b # word break
(.*\n)*? # any number of lines, minimally matching
\2> # the matching end tag
[ \t]* # trailing spaces/tabs
(?=\n+|$) # followed by a newline or end of document
)
""" % blocktagsa, re.MULTILINE | re.VERBOSE)
r_HashHTMLBlocks2 = re.compile(r"""
( # save in $1
^ # start of line (with /m)
<(%s) # start tag = $2
\b # word break
(.*\n)*? # any number of lines, minimally matching
.*\2> # the matching end tag
[ \t]* # trailing spaces/tabs
(?=\n+|\Z) # followed by a newline or end of document
)
""" % blocktagsb, re.MULTILINE | re.VERBOSE)
r_HashHR = re.compile(r"""
(?:
(?<=\n\n) # Starting after a blank line
| # or
\A\n? # the beginning of the doc
)
( # save in $1
[ ]{0,%d}
<(hr) # start tag = $2
\b # word break
([^<>])*? #
/?> # the matching end tag
[ \t]*
(?=\n{2,}|\Z)# followed by a blank line or end of document
)
""" % (tabwidth-1), re.VERBOSE)
r_HashComment = re.compile(r"""
(?:
(?<=\n\n) # Starting after a blank line
| # or
\A\n? # the beginning of the doc
)
( # save in $1
[ ]{0,%d}
(?:
)
[ \t]*
(?=\n{2,}|\Z)# followed by a blank line or end of document
)
""" % (tabwidth-1), re.VERBOSE)
def _HashHTMLBlocks(self, text):
def handler(m):
key = md5.new(m.group(1)).hexdigest()
self.html_blocks[key] = m.group(1)
return "\n\n%s\n\n" % key
text = self.r_HashHTMLBlocks1.sub(handler, text)
text = self.r_HashHTMLBlocks2.sub(handler, text)
oldtext = text
text = self.r_HashHR.sub(handler, text)
text = self.r_HashComment.sub(handler, text)
return text
#@@@ wrong!
r_hr1 = re.compile(r'^[ ]{0,2}([ ]?\*[ ]?){3,}[ \t]*$', re.M)
r_hr2 = re.compile(r'^[ ]{0,2}([ ]?-[ ]?){3,}[ \t]*$', re.M)
r_hr3 = re.compile(r'^[ ]{0,2}([ ]?_[ ]?){3,}[ \t]*$', re.M)
def _RunBlockGamut(self, text):
text = self._DoHeaders(text)
for x in [self.r_hr1, self.r_hr2, self.r_hr3]:
text = x.sub("\n s.
text = self._HashHTMLBlocks(text)
text = self._FormParagraphs(text)
return text
r_NewLine = re.compile(" {2,}\n")
def _RunSpanGamut(self, text):
text = self._DoCodeSpans(text)
text = self._EscapeSpecialChars(text)
text = self._DoImages(text)
text = self._DoAnchors(text)
text = self._DoAutoLinks(text)
text = self._EncodeAmpsAndAngles(text)
text = self._DoItalicsAndBold(text)
text = self.r_NewLine.sub(" ? # href = $3
[ \t]*
( # $4
([\'\"]) # quote char = $5
(.*?) # Title = $6
\5 # matching quote
)? # title is optional
\)
)
""", re.S|re.VERBOSE)
def _DoAnchors(self, text):
# We here don't do the same as the perl version, as python's regex
# engine gives us no way to match brackets.
def handler1(m):
whole_match = m.group(1)
link_text = m.group(2)
link_id = m.group(3).lower()
if not link_id: link_id = link_text.lower()
title = self.titles.get(link_id, None)
if self.urls.has_key(link_id):
url = self.urls[link_id]
url = url.replace("*", self.escapetable["*"])
url = url.replace("_", self.escapetable["_"])
res = '%s " % htmlquote(link_text)
else:
res = whole_match
return res
def handler2(m):
whole_match = m.group(1)
link_text = m.group(2)
url = m.group(3)
title = m.group(6)
url = url.replace("*", self.escapetable["*"])
url = url.replace("_", self.escapetable["_"])
res = '''%s " % htmlquote(link_text)
return res
text = self.r_DoAnchors1.sub(handler1, text)
text = self.r_DoAnchors2.sub(handler2, text)
return text
r_DoImages1 = re.compile(
r""" ( # wrap whole match in $1
!\[
(.*?) # alt text = $2
\]
[ ]? # one optional space
(?:\n[ ]*)? # one optional newline followed by spaces
\[
(.*?) # id = $3
\]
)
""", re.VERBOSE|re.S)
r_DoImages2 = re.compile(
r""" ( # wrap whole match in $1
!\[
(.*?) # alt text = $2
\]
\( # literal paren
[ \t]*
(\S+?)>? # src url = $3
[ \t]*
( # $4
([\'\"]) # quote char = $5
(.*?) # title = $6
\5 # matching quote
[ \t]*
)? # title is optional
\)
)
""", re.VERBOSE|re.S)
def _DoImages(self, text):
def handler1(m):
whole_match = m.group(1)
alt_text = m.group(2)
link_id = m.group(3).lower()
if not link_id:
link_id = alt_text.lower()
alt_text = alt_text.replace('"', """)
if self.urls.has_key(link_id):
url = self.urls[link_id]
url = url.replace("*", self.escapetable["*"])
url = url.replace("_", self.escapetable["_"])
res = ''' = len(textl): continue
count = textl[i].strip().count(c)
if count > 0 and count == len(textl[i].strip()) and textl[i+1].strip() == '' and textl[i-1].strip() != '':
textl = textl[:i] + textl[i+1:]
textl[i-1] = ''+self._RunSpanGamut(textl[i-1])+' '
textl = textl[:i] + textl[i+1:]
text = '\n'.join(textl)
return text
def handler(m):
level = len(m.group(1))
header = self._RunSpanGamut(m.group(2))
return "%s \n\n" % (level, header, level)
text = findheader(text, '=', '1')
text = findheader(text, '-', '2')
text = self.r_DoHeaders.sub(handler, text)
return text
rt_l = r"""
(
(
[ ]{0,%d}
([*+-]|\d+[.])
[ \t]+
)
(?:.+?)
(
\Z
|
\n{2,}
(?=\S)
(?![ \t]* ([*+-]|\d+[.])[ \t]+)
)
)
""" % (tabwidth - 1)
r_DoLists = re.compile('^'+rt_l, re.M | re.VERBOSE | re.S)
r_DoListsTop = re.compile(
r'(?:\A\n?|(?<=\n\n))'+rt_l, re.M | re.VERBOSE | re.S)
def _DoLists(self, text):
def handler(m):
list_type = "ol"
if m.group(3) in [ "*", "-", "+" ]:
list_type = "ul"
listn = m.group(1)
listn = self.r_multiline.sub("\n\n\n", listn)
res = self._ProcessListItems(listn)
res = "<%s>\n%s%s>\n" % (list_type, res, list_type)
return res
if self.list_level:
text = self.r_DoLists.sub(handler, text)
else:
text = self.r_DoListsTop.sub(handler, text)
return text
r_multiend = re.compile(r"\n{2,}\Z")
r_ProcessListItems = re.compile(r"""
(\n)? # leading line = $1
(^[ \t]*) # leading whitespace = $2
([*+-]|\d+[.]) [ \t]+ # list marker = $3
((?:.+?) # list item text = $4
(\n{1,2}))
(?= \n* (\Z | \2 ([*+-]|\d+[.]) [ \t]+))
""", re.VERBOSE | re.M | re.S)
def _ProcessListItems(self, text):
self.list_level += 1
text = self.r_multiend.sub("\n", text)
def handler(m):
item = m.group(4)
leading_line = m.group(1)
leading_space = m.group(2)
if leading_line or self.r_multiline.search(item):
item = self._RunBlockGamut(self._Outdent(item))
else:
item = self._DoLists(self._Outdent(item))
if item[-1] == "\n": item = item[:-1] # chomp
item = self._RunSpanGamut(item)
return "%s \n" % item
text = self.r_ProcessListItems.sub(handler, text)
self.list_level -= 1
return text
r_DoCodeBlocks = re.compile(r"""
(?:\n\n|\A)
( # $1 = the code block
(?:
(?:[ ]{%d} | \t) # Lines must start with a tab or equiv
.*\n+
)+
)
((?=^[ ]{0,%d}\S)|\Z) # Lookahead for non-space/end of doc
""" % (tabwidth, tabwidth), re.M | re.VERBOSE)
def _DoCodeBlocks(self, text):
def handler(m):
codeblock = m.group(1)
codeblock = self._EncodeCode(self._Outdent(codeblock))
codeblock = self._Detab(codeblock)
codeblock = codeblock.lstrip("\n")
codeblock = codeblock.rstrip()
res = "\n\n%s\n
\n\n" % codeblock
return res
text = self.r_DoCodeBlocks.sub(handler, text)
return text
r_DoCodeSpans = re.compile(r"""
(`+) # $1 = Opening run of `
(.+?) # $2 = The code block
(?%s" % c
text = self.r_DoCodeSpans.sub(handler, text)
return text
def _EncodeCode(self, text):
text = text.replace("&","&")
text = text.replace("<","<")
text = text.replace(">",">")
for c in "*_{}[]\\":
text = text.replace(c, self.escapetable[c])
return text
r_DoBold = re.compile(r"(\*\*|__) (?=\S) (.+?[*_]*) (?<=\S) \1", re.VERBOSE | re.S)
r_DoItalics = re.compile(r"(\*|_) (?=\S) (.+?) (?<=\S) \1", re.VERBOSE | re.S)
def _DoItalicsAndBold(self, text):
text = self.r_DoBold.sub(r"\2 ", text)
text = self.r_DoItalics.sub(r"\2 ", text)
return text
r_start = re.compile(r"^", re.M)
r_DoBlockQuotes1 = re.compile(r"^[ \t]*>[ \t]?", re.M)
r_DoBlockQuotes2 = re.compile(r"^[ \t]+$", re.M)
r_DoBlockQuotes3 = re.compile(r"""
( # Wrap whole match in $1
(
^[ \t]*>[ \t]? # '>' at the start of a line
.+\n # rest of the first line
(.+\n)* # subsequent consecutive lines
\n* # blanks
)+
)""", re.M | re.VERBOSE)
r_protectpre = re.compile(r'(\s*.+? )', re.S)
r_propre = re.compile(r'^ ', re.M)
def _DoBlockQuotes(self, text):
def prehandler(m):
return self.r_propre.sub('', m.group(1))
def handler(m):
bq = m.group(1)
bq = self.r_DoBlockQuotes1.sub("", bq)
bq = self.r_DoBlockQuotes2.sub("", bq)
bq = self._RunBlockGamut(bq)
bq = self.r_start.sub(" ", bq)
bq = self.r_protectpre.sub(prehandler, bq)
return "\n%s\n \n\n" % bq
text = self.r_DoBlockQuotes3.sub(handler, text)
return text
r_tabbed = re.compile(r"^([ \t]*)")
def _FormParagraphs(self, text):
text = text.strip("\n")
grafs = self.r_multiline.split(text)
for g in xrange(len(grafs)):
t = grafs[g].strip() #@@?
if not self.html_blocks.has_key(t):
t = self._RunSpanGamut(t)
t = self.r_tabbed.sub(r"", t)
t += "
"
grafs[g] = t
for g in xrange(len(grafs)):
t = grafs[g].strip()
if self.html_blocks.has_key(t):
grafs[g] = self.html_blocks[t]
return "\n\n".join(grafs)
r_EncodeAmps = re.compile(r"&(?!#?[xX]?(?:[0-9a-fA-F]+|\w+);)")
r_EncodeAngles = re.compile(r"<(?![a-z/?\$!])")
def _EncodeAmpsAndAngles(self, text):
text = self.r_EncodeAmps.sub("&", text)
text = self.r_EncodeAngles.sub("<", text)
return text
def _EncodeBackslashEscapes(self, text):
for char in self.escapechars:
text = text.replace("\\" + char, self.escapetable[char])
return text
r_link = re.compile(r"<((https?|ftp):[^\'\">\s]+)>", re.I)
r_email = re.compile(r"""
<
(?:mailto:)?
(
[-.\w]+
\@
[-a-z0-9]+(\.[-a-z0-9]+)*\.[a-z]+
)
>""", re.VERBOSE|re.I)
def _DoAutoLinks(self, text):
text = self.r_link.sub(r'\1 ', text)
def handler(m):
l = m.group(1)
return self._EncodeEmailAddress(self._UnescapeSpecialChars(l))
text = self.r_email.sub(handler, text)
return text
r_EncodeEmailAddress = re.compile(r">.+?:")
def _EncodeEmailAddress(self, text):
encode = [
lambda x: "%s;" % ord(x),
lambda x: "%X;" % ord(x),
lambda x: x
]
text = "mailto:" + text
addr = ""
for c in text:
if c == ':': addr += c; continue
r = semirandom(addr)
if r < 0.45:
addr += encode[1](c)
elif r > 0.9 and c != '@':
addr += encode[2](c)
else:
addr += encode[0](c)
text = '%s ' % (addr, addr)
text = self.r_EncodeEmailAddress.sub('>', text)
return text
def _UnescapeSpecialChars(self, text):
for key in self.escapetable.keys():
text = text.replace(self.escapetable[key], key)
return text
tokenize_depth = 6
tokenize_nested_tags = '|'.join([r'(?:<[a-z/!$](?:[^<>]'] * tokenize_depth) + (')*>)' * tokenize_depth)
r_TokenizeHTML = re.compile(
r"""(?: ) | # comment
(?: <\? .*? \?> ) | # processing instruction
%s # nested tags
""" % tokenize_nested_tags, re.I|re.VERBOSE)
def _TokenizeHTML(self, text):
pos = 0
tokens = []
matchobj = self.r_TokenizeHTML.search(text, pos)
while matchobj:
whole_tag = matchobj.string[matchobj.start():matchobj.end()]
sec_start = matchobj.end()
tag_start = sec_start - len(whole_tag)
if pos < tag_start:
tokens.append(["text", matchobj.string[pos:tag_start]])
tokens.append(["tag", whole_tag])
pos = sec_start
matchobj = self.r_TokenizeHTML.search(text, pos)
if pos < len(text):
tokens.append(["text", text[pos:]])
return tokens
r_Outdent = re.compile(r"""^(\t|[ ]{1,%d})""" % tabwidth, re.M)
def _Outdent(self, text):
text = self.r_Outdent.sub("", text)
return text
def _Detab(self, text): return text.expandtabs(self.tabwidth)
def Markdown(*args, **kw): return _Markdown().parse(*args, **kw)
markdown = Markdown
if __name__ == '__main__':
if len(sys.argv) > 1:
print Markdown(open(sys.argv[1]).read())
else:
print Markdown(sys.stdin.read())
webpy/tools/makedoc.py 0000644 0001750 0001750 00000010515 13146625266 013472 0 ustar wmb wmb """
Outputs web.py docs as html
version 2.0: documents all code, and indents nicely.
By Colin Rothwell (TheBoff)
"""
import sys
import inspect
import markdown
sys.path.insert(0, '..')
from web.net import websafe
ALL_MODULES = [
'web.application',
'web.contrib.template',
'web.db',
'web.debugerror',
'web.form',
'web.http',
'web.httpserver',
'web.net',
'web.session',
'web.template',
'web.utils',
'web.webapi',
'web.webopenid',
'web.wsgi'
]
item_start = ''
item_end = '
'
indent_amount = 30
doc_these = ( #These are the types of object that should be docced
'module',
'classobj',
'instancemethod',
'function',
'type',
'property',
)
not_these_names = ( #Any particular object names that shouldn't be doced
'fget',
'fset',
'fdel',
'storage', #These stop the lower case versions getting docced
'memoize',
'iterbetter',
'capturesstdout',
'profile',
'threadeddict',
'd', #Don't know what this is, but only only conclude it shouldn't be doc'd
)
css = '''
'''
indent_start = ''
indent_end = '
'
header = '''
'''
def type_string(ob):
return str(type(ob)).split("'")[1]
def ts_css(text):
"""applies nice css to the type string"""
return '%s ' % text
def arg_string(func):
"""Returns a nice argstring for a function or method"""
return inspect.formatargspec(*inspect.getargspec(func))
def recurse_over(ob, name, indent_level=0):
ts = type_string(ob)
if not ts in doc_these: return #stos what shouldn't be docced getting docced
if indent_level > 0 and ts == 'module': return #Stops it getting into the stdlib
if name in not_these_names: return #Stops things we don't want getting docced
indent = indent_level * indent_amount #Indents nicely
ds_indent = indent + (indent_amount / 2)
if indent_level > 0: print indent_start % indent
argstr = ''
if ts.endswith(('function', 'method')):
argstr = arg_string(ob)
elif ts == 'classobj' or ts == 'type':
if ts == 'classobj': ts = 'class'
if hasattr(ob, '__init__'):
if type_string(ob.__init__) == 'instancemethod':
argstr = arg_string(ob.__init__)
else:
argstr = '(self)'
if ts == 'instancemethod': ts = 'method' #looks much nicer
ds = inspect.getdoc(ob)
if ds is None: ds = ''
ds = markdown.Markdown(ds)
mlink = '' % name if ts == 'module' else ''
mend = ' ' if ts == 'module' else ''
print ''.join(('', ts_css(ts), item_start % ts, ' ', mlink, name,
websafe(argstr), mend, item_end, ' '))
print ''.join((indent_start % ds_indent, ds, indent_end, '
'))
#Although ''.join looks wierd, it's alot faster is string addition
members = ''
if hasattr(ob, '__all__'): members = ob.__all__
else: members = [item for item in dir(ob) if not item.startswith('_')]
if not 'im_class' in members:
for name in members:
recurse_over(getattr(ob, name), name, indent_level + 1)
if indent_level > 0: print indent_end
def main(modules=None):
modules = modules or ALL_MODULES
print '' #Stops markdown vandalising my html.
print css
print header
print '
'
for name in modules:
print '%(name)s ' % dict(name=name)
print ' '
for name in modules:
try:
mod = __import__(name, {}, {}, 'x')
recurse_over(mod, name)
except ImportError as e:
print >> sys.stderr, "Unable to import module %s (Error: %s)" % (name, e)
pass
print '
'
if __name__ == '__main__':
main(sys.argv[1:])
webpy/LICENSE.txt 0000644 0001750 0001750 00000000402 13146625266 012172 0 ustar wmb wmb web.py is in the public domain; it can be used for whatever purpose with absolutely no restrictions.
CherryPy WSGI server that is included in the web.py as web.wsgiserver is licensed under CherryPy License. See web/wsgiserver/LICENSE.txt for more details.
webpy/.travis.yml 0000644 0001750 0001750 00000001275 13146625266 012471 0 ustar wmb wmb language: python
python:
- "2.6"
- "2.7"
- "3.5"
- "3.6"
- "3.7-dev"
install:
- pip install -r test/requirements.txt
- if [[ $TRAVIS_PYTHON_VERSION = 2* ]] ; then pip install -r test/requirements2.txt; fi
script: python test/alltests.py
before_install:
- "sudo mkdir -p /usr/include/postgresql/8.4/server"
# - 'sudo apt-get -o Dpkg::Options::="--force-overwrite" install python-profiler'
before_script:
- "psql -c 'create database webpy;' -U postgres"
- mysql --user=root -e "create user 'scott'@'localhost' identified by 'tiger'; create database webpy; grant all privileges on webpy.* to 'scott'@'localhost' with grant option;"
notifications:
irc: "irc.freenode.org#webpy"
webpy/web/ 0000755 0001750 0001750 00000000000 13146625266 011130 5 ustar wmb wmb webpy/web/form.py 0000644 0001750 0001750 00000033237 13146625266 012455 0 ustar wmb wmb """
HTML forms
(part of web.py)
"""
import copy, re
from . import utils, net, webapi as web
def attrget(obj, attr, value=None):
try:
if hasattr(obj, 'has_key') and obj.has_key(attr):
return obj[attr]
except TypeError:
# Handle the case where has_key takes different number of arguments.
# This is the case with Model objects on appengine. See #134
pass
if hasattr(obj, 'keys') and attr in obj.keys(): #needed for Py3, has_key doesn't exist anymore
return obj[attr]
elif hasattr(obj, attr):
return getattr(obj, attr)
return value
class Form(object):
r"""
HTML form.
>>> f = Form(Textbox("x"))
>>> f.render()
u''
>>> f.fill(x="42")
True
>>> f.render()
u''
"""
def __init__(self, *inputs, **kw):
self.inputs = inputs
self.valid = True
self.note = None
self.validators = kw.pop('validators', [])
def __call__(self, x=None):
o = copy.deepcopy(self)
if x: o.validates(x)
return o
def render(self):
out = ''
out += self.rendernote(self.note)
out += '\n'
for i in self.inputs:
html = utils.safeunicode(i.pre) + i.render() + self.rendernote(i.note) + utils.safeunicode(i.post)
if i.is_hidden():
out += ' %s \n' % (html)
else:
out += ' %s %s \n' % (i.id, net.websafe(i.description), html)
out += "
"
return out
def render_css(self):
out = []
out.append(self.rendernote(self.note))
for i in self.inputs:
if not i.is_hidden():
out.append('%s ' % (i.id, net.websafe(i.description)))
out.append(i.pre)
out.append(i.render())
out.append(self.rendernote(i.note))
out.append(i.post)
out.append('\n')
return ''.join(out)
def rendernote(self, note):
if note: return '%s ' % net.websafe(note)
else: return ""
def validates(self, source=None, _validate=True, **kw):
source = source or kw or web.input()
out = True
for i in self.inputs:
v = attrget(source, i.name)
if _validate:
out = i.validate(v) and out
else:
i.set_value(v)
if _validate:
out = out and self._validate(source)
self.valid = out
return out
def _validate(self, value):
self.value = value
for v in self.validators:
if not v.valid(value):
self.note = v.msg
return False
return True
def fill(self, source=None, **kw):
return self.validates(source, _validate=False, **kw)
def __getitem__(self, i):
for x in self.inputs:
if x.name == i: return x
raise KeyError(i)
def __getattr__(self, name):
# don't interfere with deepcopy
inputs = self.__dict__.get('inputs') or []
for x in inputs:
if x.name == name: return x
raise AttributeError(name)
def get(self, i, default=None):
try:
return self[i]
except KeyError:
return default
def _get_d(self): #@@ should really be form.attr, no?
return utils.storage([(i.name, i.get_value()) for i in self.inputs])
d = property(_get_d)
class Input(object):
def __init__(self, name, *validators, **attrs):
self.name = name
self.validators = validators
self.attrs = attrs = AttributeList(attrs)
self.description = attrs.pop('description', name)
self.value = attrs.pop('value', None)
self.pre = attrs.pop('pre', "")
self.post = attrs.pop('post', "")
self.note = None
self.id = attrs.setdefault('id', self.get_default_id())
if 'class_' in attrs:
attrs['class'] = attrs['class_']
del attrs['class_']
def is_hidden(self):
return False
def get_type(self):
raise NotImplementedError()
def get_default_id(self):
return self.name
def validate(self, value):
self.set_value(value)
for v in self.validators:
if not v.valid(value):
self.note = v.msg
return False
return True
def set_value(self, value):
self.value = value
def get_value(self):
return self.value
def render(self):
attrs = self.attrs.copy()
attrs['type'] = self.get_type()
if self.value is not None:
attrs['value'] = self.value
attrs['name'] = self.name
return ' ' % attrs
def rendernote(self, note):
if note: return '%s ' % net.websafe(note)
else: return ""
def addatts(self):
# add leading space for backward-compatibility
return " " + str(self.attrs)
class AttributeList(dict):
"""List of atributes of input.
>>> a = AttributeList(type='text', name='x', value=20)
>>> a
"""
def copy(self):
return AttributeList(self)
def __str__(self):
return " ".join(['%s="%s"' % (k, net.websafe(v)) for k, v in sorted(self.items())])
def __repr__(self):
return '' % repr(str(self))
class Textbox(Input):
"""Textbox input.
>>> Textbox(name='foo', value='bar').render()
u' '
>>> Textbox(name='foo', value=0).render()
u' '
"""
def get_type(self):
return 'text'
class Password(Input):
"""Password input.
>>> Password(name='password', value='secret').render()
u' '
"""
def get_type(self):
return 'password'
class Textarea(Input):
"""Textarea input.
>>> Textarea(name='foo', value='bar').render()
u'bar '
"""
def render(self):
attrs = self.attrs.copy()
attrs['name'] = self.name
value = net.websafe(self.value or '')
return '%s ' % (attrs, value)
class Dropdown(Input):
r"""Dropdown/select input.
>>> Dropdown(name='foo', args=['a', 'b', 'c'], value='b').render()
u'\n a \n b \n c \n \n'
>>> Dropdown(name='foo', args=[('a', 'aa'), ('b', 'bb'), ('c', 'cc')], value='b').render()
u'\n aa \n bb \n cc \n \n'
"""
def __init__(self, name, args, *validators, **attrs):
self.args = args
super(Dropdown, self).__init__(name, *validators, **attrs)
def render(self):
attrs = self.attrs.copy()
attrs['name'] = self.name
x = '\n' % attrs
for arg in self.args:
x += self._render_option(arg)
x += ' \n'
return x
def _render_option(self, arg, indent=' '):
if isinstance(arg, (tuple, list)):
value, desc= arg
else:
value, desc = arg, arg
value = utils.safestr(value)
if isinstance(self.value, (tuple, list)):
s_value = [utils.safestr(x) for x in self.value]
else:
s_value = utils.safestr(self.value)
if s_value == value or (isinstance(s_value, list) and value in s_value):
select_p = ' selected="selected"'
else:
select_p = ''
return indent + '%s \n' % (select_p, net.websafe(value), net.websafe(desc))
class GroupedDropdown(Dropdown):
r"""Grouped Dropdown/select input.
>>> GroupedDropdown(name='car_type', args=(('Swedish Cars', ('Volvo', 'Saab')), ('German Cars', ('Mercedes', 'Audi'))), value='Audi').render()
u'\n \n Volvo \n Saab \n \n \n Mercedes \n Audi \n \n \n'
>>> GroupedDropdown(name='car_type', args=(('Swedish Cars', (('v', 'Volvo'), ('s', 'Saab'))), ('German Cars', (('m', 'Mercedes'), ('a', 'Audi')))), value='a').render()
u'\n \n Volvo \n Saab \n \n \n Mercedes \n Audi \n \n \n'
"""
def __init__(self, name, args, *validators, **attrs):
self.args = args
super(Dropdown, self).__init__(name, *validators, **attrs)
def render(self):
attrs = self.attrs.copy()
attrs['name'] = self.name
x = '\n' % attrs
for label, options in self.args:
x += ' \n' % net.websafe(label)
for arg in options:
x += self._render_option(arg, indent = ' ')
x += ' \n'
x += ' \n'
return x
class Radio(Input):
def __init__(self, name, args, *validators, **attrs):
self.args = args
super(Radio, self).__init__(name, *validators, **attrs)
def render(self):
x = ''
for arg in self.args:
if isinstance(arg, (tuple, list)):
value, desc= arg
else:
value, desc = arg, arg
attrs = self.attrs.copy()
attrs['name'] = self.name
attrs['type'] = 'radio'
attrs['value'] = value
if self.value == value:
attrs['checked'] = 'checked'
x += ' %s' % (attrs, net.websafe(desc))
x += ' '
return x
class Checkbox(Input):
"""Checkbox input.
>>> Checkbox('foo', value='bar', checked=True).render()
u' '
>>> Checkbox('foo', value='bar').render()
u' '
>>> c = Checkbox('foo', value='bar')
>>> c.validate('on')
True
>>> c.render()
u' '
"""
def __init__(self, name, *validators, **attrs):
self.checked = attrs.pop('checked', False)
Input.__init__(self, name, *validators, **attrs)
def get_default_id(self):
value = utils.safestr(self.value or "")
return self.name + '_' + value.replace(' ', '_')
def render(self):
attrs = self.attrs.copy()
attrs['type'] = 'checkbox'
attrs['name'] = self.name
attrs['value'] = self.value
if self.checked:
attrs['checked'] = 'checked'
return ' ' % attrs
def set_value(self, value):
self.checked = bool(value)
def get_value(self):
return self.checked
class Button(Input):
"""HTML Button.
>>> Button("save").render()
u'save '
>>> Button("action", value="save", html="Save Changes ").render()
u'Save Changes '
"""
def __init__(self, name, *validators, **attrs):
super(Button, self).__init__(name, *validators, **attrs)
self.description = ""
def render(self):
attrs = self.attrs.copy()
attrs['name'] = self.name
if self.value is not None:
attrs['value'] = self.value
html = attrs.pop('html', None) or net.websafe(self.name)
return '%s ' % (attrs, html)
class Hidden(Input):
"""Hidden Input.
>>> Hidden(name='foo', value='bar').render()
u' '
"""
def is_hidden(self):
return True
def get_type(self):
return 'hidden'
class File(Input):
"""File input.
>>> File(name='f').render()
u' '
"""
def get_type(self):
return 'file'
class Validator:
def __deepcopy__(self, memo): return copy.copy(self)
def __init__(self, msg, test, jstest=None): utils.autoassign(self, locals())
def valid(self, value):
try: return self.test(value)
except: return False
notnull = Validator("Required", bool)
class regexp(Validator):
def __init__(self, rexp, msg):
self.rexp = re.compile(rexp)
self.msg = msg
def valid(self, value):
return bool(self.rexp.match(value))
if __name__ == "__main__":
import doctest
doctest.testmod()
webpy/web/wsgi.py 0000644 0001750 0001750 00000004450 13146625266 012456 0 ustar wmb wmb """
WSGI Utilities
(from web.py)
"""
import os, sys
from . import http
from . import webapi as web
from .utils import listget, intget
from .net import validaddr, validip
from . import httpserver
def runfcgi(func, addr=('localhost', 8000)):
"""Runs a WSGI function as a FastCGI server."""
import flup.server.fcgi as flups
return flups.WSGIServer(func, multiplexed=True, bindAddress=addr, debug=False).run()
def runscgi(func, addr=('localhost', 4000)):
"""Runs a WSGI function as an SCGI server."""
import flup.server.scgi as flups
return flups.WSGIServer(func, bindAddress=addr, debug=False).run()
def runwsgi(func):
"""
Runs a WSGI-compatible `func` using FCGI, SCGI, or a simple web server,
as appropriate based on context and `sys.argv`.
"""
if 'SERVER_SOFTWARE' in os.environ: # cgi
os.environ['FCGI_FORCE_CGI'] = 'Y'
if ('PHP_FCGI_CHILDREN' in os.environ #lighttpd fastcgi
or 'SERVER_SOFTWARE' in os.environ):
return runfcgi(func, None)
if 'fcgi' in sys.argv or 'fastcgi' in sys.argv:
args = sys.argv[1:]
if 'fastcgi' in args: args.remove('fastcgi')
elif 'fcgi' in args: args.remove('fcgi')
if args:
return runfcgi(func, validaddr(args[0]))
else:
return runfcgi(func, None)
if 'scgi' in sys.argv:
args = sys.argv[1:]
args.remove('scgi')
if args:
return runscgi(func, validaddr(args[0]))
else:
return runscgi(func)
server_addr = validip(listget(sys.argv, 1, ''))
if 'PORT' in os.environ: # e.g. Heroku
server_addr = ('0.0.0.0', intget(os.environ['PORT']))
return httpserver.runsimple(func, server_addr)
def _is_dev_mode():
# Some embedded python interpreters won't have sys.arv
# For details, see https://github.com/webpy/webpy/issues/87
argv = getattr(sys, "argv", [])
# quick hack to check if the program is running in dev mode.
if 'SERVER_SOFTWARE' in os.environ \
or 'PHP_FCGI_CHILDREN' in os.environ \
or 'fcgi' in argv or 'fastcgi' in argv \
or 'mod_wsgi' in argv:
return False
return True
# When running the builtin-server, enable debug mode if not already set.
web.config.setdefault('debug', _is_dev_mode())
webpy/web/webapi.py 0000644 0001750 0001750 00000042406 13146625266 012757 0 ustar wmb wmb """
Web API (wrapper around WSGI)
(from web.py)
"""
from __future__ import print_function
__all__ = [
"config",
"header", "debug",
"input", "data",
"setcookie", "cookies",
"ctx",
"HTTPError",
# 200, 201, 202, 204
"OK", "Created", "Accepted", "NoContent",
"ok", "created", "accepted", "nocontent",
# 301, 302, 303, 304, 307
"Redirect", "Found", "SeeOther", "NotModified", "TempRedirect",
"redirect", "found", "seeother", "notmodified", "tempredirect",
# 400, 401, 403, 404, 405, 406, 409, 410, 412, 415, 451
"BadRequest", "Unauthorized", "Forbidden", "NotFound", "NoMethod", "NotAcceptable", "Conflict", "Gone", "PreconditionFailed", "UnsupportedMediaType", "UnavailableForLegalReasons",
"badrequest", "unauthorized", "forbidden", "notfound", "nomethod", "notacceptable", "conflict", "gone", "preconditionfailed", "unsupportedmediatype", "unavailableforlegalreasons",
# 500
"InternalError",
"internalerror",
]
import sys, cgi, pprint, urllib
from .utils import storage, storify, threadeddict, dictadd, intget, safestr
from .py3helpers import PY2, urljoin, string_types
try:
from urllib.parse import unquote, quote
from http.cookies import Morsel
except ImportError:
from urllib import unquote, quote
from Cookie import Morsel
from io import StringIO, BytesIO
config = storage()
config.__doc__ = """
A configuration object for various aspects of web.py.
`debug`
: when True, enables reloading, disabled template caching and sets internalerror to debugerror.
"""
class HTTPError(Exception):
def __init__(self, status, headers={}, data=""):
ctx.status = status
for k, v in headers.items():
header(k, v)
self.data = data
Exception.__init__(self, status)
def _status_code(status, data=None, classname=None, docstring=None):
if data is None:
data = status.split(" ", 1)[1]
classname = status.split(" ", 1)[1].replace(' ', '') # 304 Not Modified -> NotModified
docstring = docstring or '`%s` status' % status
def __init__(self, data=data, headers={}):
HTTPError.__init__(self, status, headers, data)
# trick to create class dynamically with dynamic docstring.
return type(classname, (HTTPError, object), {
'__doc__': docstring,
'__init__': __init__
})
ok = OK = _status_code("200 OK", data="")
created = Created = _status_code("201 Created")
accepted = Accepted = _status_code("202 Accepted")
nocontent = NoContent = _status_code("204 No Content")
class Redirect(HTTPError):
"""A `301 Moved Permanently` redirect."""
def __init__(self, url, status='301 Moved Permanently', absolute=False):
"""
Returns a `status` redirect to the new URL.
`url` is joined with the base URL so that things like
`redirect("about") will work properly.
"""
newloc = urljoin(ctx.path, url)
if newloc.startswith('/'):
if absolute:
home = ctx.realhome
else:
home = ctx.home
newloc = home + newloc
headers = {
'Content-Type': 'text/html',
'Location': newloc
}
HTTPError.__init__(self, status, headers, "")
redirect = Redirect
class Found(Redirect):
"""A `302 Found` redirect."""
def __init__(self, url, absolute=False):
Redirect.__init__(self, url, '302 Found', absolute=absolute)
found = Found
class SeeOther(Redirect):
"""A `303 See Other` redirect."""
def __init__(self, url, absolute=False):
Redirect.__init__(self, url, '303 See Other', absolute=absolute)
seeother = SeeOther
class NotModified(HTTPError):
"""A `304 Not Modified` status."""
def __init__(self):
HTTPError.__init__(self, "304 Not Modified")
notmodified = NotModified
class TempRedirect(Redirect):
"""A `307 Temporary Redirect` redirect."""
def __init__(self, url, absolute=False):
Redirect.__init__(self, url, '307 Temporary Redirect', absolute=absolute)
tempredirect = TempRedirect
class BadRequest(HTTPError):
"""`400 Bad Request` error."""
message = "bad request"
def __init__(self, message=None):
status = "400 Bad Request"
headers = {'Content-Type': 'text/html'}
HTTPError.__init__(self, status, headers, message or self.message)
badrequest = BadRequest
class Unauthorized(HTTPError):
"""`401 Unauthorized` error."""
message = "unauthorized"
def __init__(self, message=None):
status = "401 Unauthorized"
headers = {'Content-Type': 'text/html'}
HTTPError.__init__(self, status, headers, message or self.message)
unauthorized = Unauthorized
class Forbidden(HTTPError):
"""`403 Forbidden` error."""
message = "forbidden"
def __init__(self, message=None):
status = "403 Forbidden"
headers = {'Content-Type': 'text/html'}
HTTPError.__init__(self, status, headers, message or self.message)
forbidden = Forbidden
class _NotFound(HTTPError):
"""`404 Not Found` error."""
message = "not found"
def __init__(self, message=None):
status = '404 Not Found'
headers = {'Content-Type': 'text/html'}
HTTPError.__init__(self, status, headers, message or self.message)
def NotFound(message=None):
"""Returns HTTPError with '404 Not Found' error from the active application.
"""
if message:
return _NotFound(message)
elif ctx.get('app_stack'):
return ctx.app_stack[-1].notfound()
else:
return _NotFound()
notfound = NotFound
class NoMethod(HTTPError):
"""A `405 Method Not Allowed` error."""
def __init__(self, cls=None):
status = '405 Method Not Allowed'
headers = {}
headers['Content-Type'] = 'text/html'
methods = ['GET', 'HEAD', 'POST', 'PUT', 'DELETE']
if cls:
methods = [method for method in methods if hasattr(cls, method)]
headers['Allow'] = ', '.join(methods)
data = None
HTTPError.__init__(self, status, headers, data)
nomethod = NoMethod
class NotAcceptable(HTTPError):
"""`406 Not Acceptable` error."""
message = "not acceptable"
def __init__(self, message=None):
status = "406 Not Acceptable"
headers = {'Content-Type': 'text/html'}
HTTPError.__init__(self, status, headers, message or self.message)
notacceptable = NotAcceptable
class Conflict(HTTPError):
"""`409 Conflict` error."""
message = "conflict"
def __init__(self, message=None):
status = "409 Conflict"
headers = {'Content-Type': 'text/html'}
HTTPError.__init__(self, status, headers, message or self.message)
conflict = Conflict
class Gone(HTTPError):
"""`410 Gone` error."""
message = "gone"
def __init__(self, message=None):
status = '410 Gone'
headers = {'Content-Type': 'text/html'}
HTTPError.__init__(self, status, headers, message or self.message)
gone = Gone
class PreconditionFailed(HTTPError):
"""`412 Precondition Failed` error."""
message = "precondition failed"
def __init__(self, message=None):
status = "412 Precondition Failed"
headers = {'Content-Type': 'text/html'}
HTTPError.__init__(self, status, headers, message or self.message)
preconditionfailed = PreconditionFailed
class UnsupportedMediaType(HTTPError):
"""`415 Unsupported Media Type` error."""
message = "unsupported media type"
def __init__(self, message=None):
status = "415 Unsupported Media Type"
headers = {'Content-Type': 'text/html'}
HTTPError.__init__(self, status, headers, message or self.message)
unsupportedmediatype = UnsupportedMediaType
class _UnavailableForLegalReasons(HTTPError):
"""`451 Unavailable For Legal Reasons` error."""
message="unavailable for legal reasons"
def __init__(self, message=None):
status = "451 Unavailable For Legal Reasons"
headers = {'Content-Type': 'text/html'}
HTTPError.__init__(self, status, headers, message or self.message)
def UnavailableForLegalReasons(message=None):
"""Returns HTTPError with '415 Unavailable For Legal Reasons' error from the active application.
"""
if message:
return _UnavailableForLegalReasons(message)
elif ctx.get('app_stack'):
return ctx.app_stack[-1].unavailableforlegalreasons()
else:
return _UnavailableForLegalReasons()
unavailableforlegalreasons = UnavailableForLegalReasons
class _InternalError(HTTPError):
"""500 Internal Server Error`."""
message = "internal server error"
def __init__(self, message=None):
status = '500 Internal Server Error'
headers = {'Content-Type': 'text/html'}
HTTPError.__init__(self, status, headers, message or self.message)
def InternalError(message=None):
"""Returns HTTPError with '500 internal error' error from the active application.
"""
if message:
return _InternalError(message)
elif ctx.get('app_stack'):
return ctx.app_stack[-1].internalerror()
else:
return _InternalError()
internalerror = InternalError
def header(hdr, value, unique=False):
"""
Adds the header `hdr: value` with the response.
If `unique` is True and a header with that name already exists,
it doesn't add a new one.
"""
hdr, value = safestr(hdr), safestr(value)
# protection against HTTP response splitting attack
if '\n' in hdr or '\r' in hdr or '\n' in value or '\r' in value:
raise ValueError('invalid characters in header')
if unique is True:
for h, v in ctx.headers:
if h.lower() == hdr.lower(): return
ctx.headers.append((hdr, value))
def rawinput(method=None):
"""Returns storage object with GET or POST arguments.
"""
method = method or "both"
def dictify(fs):
# hack to make web.input work with enctype='text/plain.
if fs.list is None:
fs.list = []
return dict([(k, fs[k]) for k in fs.keys()])
e = ctx.env.copy()
a = b = {}
if method.lower() in ['both', 'post', 'put']:
if e['REQUEST_METHOD'] in ['POST', 'PUT']:
if e.get('CONTENT_TYPE', '').lower().startswith('multipart/'):
# since wsgi.input is directly passed to cgi.FieldStorage,
# it can not be called multiple times. Saving the FieldStorage
# object in ctx to allow calling web.input multiple times.
a = ctx.get('_fieldstorage')
if not a:
fp = e['wsgi.input']
a = cgi.FieldStorage(fp=fp, environ=e, keep_blank_values=1)
ctx._fieldstorage = a
else:
d = data()
if PY2 and isinstance(d, unicode):
d = d.encode('utf-8')
fp = BytesIO(d)
a = cgi.FieldStorage(fp=fp, environ=e, keep_blank_values=1)
a = dictify(a)
if method.lower() in ['both', 'get']:
e['REQUEST_METHOD'] = 'GET'
b = dictify(cgi.FieldStorage(environ=e, keep_blank_values=1))
def process_fieldstorage(fs):
if isinstance(fs, list):
return [process_fieldstorage(x) for x in fs]
elif fs.filename is None:
return fs.value
else:
return fs
return storage([(k, process_fieldstorage(v)) for k, v in dictadd(b, a).items()])
def input(*requireds, **defaults):
"""
Returns a `storage` object with the GET and POST arguments.
See `storify` for how `requireds` and `defaults` work.
"""
_method = defaults.pop('_method', 'both')
out = rawinput(_method)
try:
defaults.setdefault('_unicode', True) # force unicode conversion by default.
return storify(out, *requireds, **defaults)
except KeyError:
raise badrequest()
def data():
"""Returns the data sent with the request."""
if 'data' not in ctx:
cl = intget(ctx.env.get('CONTENT_LENGTH'), 0)
ctx.data = ctx.env['wsgi.input'].read(cl)
return ctx.data
def setcookie(name, value, expires='', domain=None,
secure=False, httponly=False, path=None):
"""Sets a cookie."""
morsel = Morsel()
name, value = safestr(name), safestr(value)
morsel.set(name, value, quote(value))
if isinstance(expires, int) and expires < 0:
expires = -1000000000
morsel['expires'] = expires
morsel['path'] = path or ctx.homepath+'/'
if domain:
morsel['domain'] = domain
if secure:
morsel['secure'] = secure
value = morsel.OutputString()
if httponly:
value += '; httponly'
header('Set-Cookie', value)
def decode_cookie(value):
r"""Safely decodes a cookie value to unicode.
Tries us-ascii, utf-8 and io8859 encodings, in that order.
>>> decode_cookie('')
u''
>>> decode_cookie('asdf')
u'asdf'
>>> decode_cookie('foo \xC3\xA9 bar')
u'foo \xe9 bar'
>>> decode_cookie('foo \xE9 bar')
u'foo \xe9 bar'
"""
try:
# First try plain ASCII encoding
return unicode(value, 'us-ascii')
except UnicodeError:
# Then try UTF-8, and if that fails, ISO8859
try:
return unicode(value, 'utf-8')
except UnicodeError:
return unicode(value, 'iso8859', 'ignore')
def parse_cookies(http_cookie):
r"""Parse a HTTP_COOKIE header and return dict of cookie names and decoded values.
>>> sorted(parse_cookies('').items())
[]
>>> sorted(parse_cookies('a=1').items())
[('a', '1')]
>>> sorted(parse_cookies('a=1%202').items())
[('a', '1 2')]
>>> sorted(parse_cookies('a=Z%C3%A9Z').items())
[('a', 'Z\xc3\xa9Z')]
>>> sorted(parse_cookies('a=1; b=2; c=3').items())
[('a', '1'), ('b', '2'), ('c', '3')]
>>> sorted(parse_cookies('a=1; b=w("x")|y=z; c=3').items())
[('a', '1'), ('b', 'w('), ('c', '3')]
>>> sorted(parse_cookies('a=1; b=w(%22x%22)|y=z; c=3').items())
[('a', '1'), ('b', 'w("x")|y=z'), ('c', '3')]
>>> sorted(parse_cookies('keebler=E=mc2').items())
[('keebler', 'E=mc2')]
>>> sorted(parse_cookies(r'keebler="E=mc2; L=\"Loves\"; fudge=\012;"').items())
[('keebler', 'E=mc2; L="Loves"; fudge=\n;')]
"""
#print "parse_cookies"
if '"' in http_cookie:
# HTTP_COOKIE has quotes in it, use slow but correct cookie parsing
cookie = Cookie.SimpleCookie()
try:
cookie.load(http_cookie)
except Cookie.CookieError:
# If HTTP_COOKIE header is malformed, try at least to load the cookies we can by
# first splitting on ';' and loading each attr=value pair separately
cookie = Cookie.SimpleCookie()
for attr_value in http_cookie.split(';'):
try:
cookie.load(attr_value)
except Cookie.CookieError:
pass
cookies = dict([(k, unquote(v.value)) for k, v in cookie.iteritems()])
else:
# HTTP_COOKIE doesn't have quotes, use fast cookie parsing
cookies = {}
for key_value in http_cookie.split(';'):
key_value = key_value.split('=', 1)
if len(key_value) == 2:
key, value = key_value
cookies[key.strip()] = unquote(value.strip())
return cookies
def cookies(*requireds, **defaults):
r"""Returns a `storage` object with all the request cookies in it.
See `storify` for how `requireds` and `defaults` work.
This is forgiving on bad HTTP_COOKIE input, it tries to parse at least
the cookies it can.
The values are converted to unicode if _unicode=True is passed.
"""
# If _unicode=True is specified, use decode_cookie to convert cookie value to unicode
if defaults.get("_unicode") is True:
defaults['_unicode'] = decode_cookie
# parse cookie string and cache the result for next time.
if '_parsed_cookies' not in ctx:
http_cookie = ctx.env.get("HTTP_COOKIE", "")
ctx._parsed_cookies = parse_cookies(http_cookie)
try:
return storify(ctx._parsed_cookies, *requireds, **defaults)
except KeyError:
badrequest()
raise StopIteration()
def debug(*args):
"""
Prints a prettyprinted version of `args` to stderr.
"""
try:
out = ctx.environ['wsgi.errors']
except:
out = sys.stderr
for arg in args:
print(pprint.pformat(arg), file=out)
return ''
def _debugwrite(x):
try:
out = ctx.environ['wsgi.errors']
except:
out = sys.stderr
out.write(x)
debug.write = _debugwrite
ctx = context = threadeddict()
ctx.__doc__ = """
A `storage` object containing various information about the request:
`environ` (aka `env`)
: A dictionary containing the standard WSGI environment variables.
`host`
: The domain (`Host` header) requested by the user.
`home`
: The base path for the application.
`ip`
: The IP address of the requester.
`method`
: The HTTP method used.
`path`
: The path request.
`query`
: If there are no query arguments, the empty string. Otherwise, a `?` followed
by the query string.
`fullpath`
: The full path requested, including query arguments (`== path + query`).
### Response Data
`status` (default: "200 OK")
: The status code to be used in the response.
`headers`
: A list of 2-tuples to be used in the response.
`output`
: A string to be used as the response.
"""
if __name__ == "__main__":
import doctest
doctest.testmod()
webpy/web/utils.py 0000755 0001750 0001750 00000123120 13146625266 012644 0 ustar wmb wmb #!/usr/bin/env python
"""
General Utilities
(part of web.py)
"""
from __future__ import print_function
__all__ = [
"Storage", "storage", "storify",
"Counter", "counter",
"iters",
"rstrips", "lstrips", "strips",
"safeunicode", "safestr",
"timelimit",
"Memoize", "memoize",
"re_compile", "re_subm",
"group", "uniq", "iterview",
"IterBetter", "iterbetter",
"safeiter", "safewrite",
"dictreverse", "dictfind", "dictfindall", "dictincr", "dictadd",
"requeue", "restack",
"listget", "intget", "datestr",
"numify", "denumify", "commify", "dateify",
"nthstr", "cond",
"CaptureStdout", "capturestdout", "Profile", "profile",
"tryall",
"ThreadedDict", "threadeddict",
"autoassign",
"to36",
"safemarkdown",
"sendmail"
]
import re, sys, time, threading, itertools, traceback, os
import subprocess
import datetime
from threading import local as threadlocal
from .py3helpers import PY2, itervalues, iteritems, text_type, string_types, imap, is_iter
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
class Storage(dict):
"""
A Storage object is like a dictionary except `obj.foo` can be used
in addition to `obj['foo']`.
>>> o = storage(a=1)
>>> o.a
1
>>> o['a']
1
>>> o.a = 2
>>> o['a']
2
>>> del o.a
>>> o.a
Traceback (most recent call last):
...
AttributeError: 'a'
"""
def __getattr__(self, key):
try:
return self[key]
except KeyError as k:
raise AttributeError(k)
def __setattr__(self, key, value):
self[key] = value
def __delattr__(self, key):
try:
del self[key]
except KeyError as k:
raise AttributeError(k)
def __repr__(self):
return ''
storage = Storage
def storify(mapping, *requireds, **defaults):
"""
Creates a `storage` object from dictionary `mapping`, raising `KeyError` if
d doesn't have all of the keys in `requireds` and using the default
values for keys found in `defaults`.
For example, `storify({'a':1, 'c':3}, b=2, c=0)` will return the equivalent of
`storage({'a':1, 'b':2, 'c':3})`.
If a `storify` value is a list (e.g. multiple values in a form submission),
`storify` returns the last element of the list, unless the key appears in
`defaults` as a list. Thus:
>>> storify({'a':[1, 2]}).a
2
>>> storify({'a':[1, 2]}, a=[]).a
[1, 2]
>>> storify({'a':1}, a=[]).a
[1]
>>> storify({}, a=[]).a
[]
Similarly, if the value has a `value` attribute, `storify will return _its_
value, unless the key appears in `defaults` as a dictionary.
>>> storify({'a':storage(value=1)}).a
1
>>> storify({'a':storage(value=1)}, a={}).a
>>> storify({}, a={}).a
{}
"""
_unicode = defaults.pop('_unicode', False)
# if _unicode is callable object, use it convert a string to unicode.
to_unicode = safeunicode
if _unicode is not False and hasattr(_unicode, "__call__"):
to_unicode = _unicode
def unicodify(s):
if _unicode and isinstance(s, str): return to_unicode(s)
else: return s
def getvalue(x):
if hasattr(x, 'file') and hasattr(x, 'value'):
return x.value
elif hasattr(x, 'value'):
return unicodify(x.value)
else:
return unicodify(x)
stor = Storage()
for key in requireds + tuple(mapping.keys()):
value = mapping[key]
if isinstance(value, list):
if isinstance(defaults.get(key), list):
value = [getvalue(x) for x in value]
else:
value = value[-1]
if not isinstance(defaults.get(key), dict):
value = getvalue(value)
if isinstance(defaults.get(key), list) and not isinstance(value, list):
value = [value]
setattr(stor, key, value)
for (key, value) in iteritems(defaults):
result = value
if hasattr(stor, key):
result = stor[key]
if value == () and not isinstance(result, tuple):
result = (result,)
setattr(stor, key, result)
return stor
class Counter(storage):
"""Keeps count of how many times something is added.
>>> c = counter()
>>> c.add('x')
>>> c.add('x')
>>> c.add('x')
>>> c.add('x')
>>> c.add('x')
>>> c.add('y')
>>> c['y']
1
>>> c['x']
5
>>> c.most()
['x']
"""
def add(self, n):
self.setdefault(n, 0)
self[n] += 1
def most(self):
"""Returns the keys with maximum count."""
m = max(itervalues(self))
return [k for k, v in iteritems(self) if v == m]
def least(self):
"""Returns the keys with mininum count."""
m = min(self.itervalues())
return [k for k, v in iteritems(self) if v == m]
def percent(self, key):
"""Returns what percentage a certain key is of all entries.
>>> c = counter()
>>> c.add('x')
>>> c.add('x')
>>> c.add('x')
>>> c.add('y')
>>> c.percent('x')
0.75
>>> c.percent('y')
0.25
"""
return float(self[key])/sum(self.values())
def sorted_keys(self):
"""Returns keys sorted by value.
>>> c = counter()
>>> c.add('x')
>>> c.add('x')
>>> c.add('y')
>>> c.sorted_keys()
['x', 'y']
"""
return sorted(self.keys(), key=lambda k: self[k], reverse=True)
def sorted_values(self):
"""Returns values sorted by value.
>>> c = counter()
>>> c.add('x')
>>> c.add('x')
>>> c.add('y')
>>> c.sorted_values()
[2, 1]
"""
return [self[k] for k in self.sorted_keys()]
def sorted_items(self):
"""Returns items sorted by value.
>>> c = counter()
>>> c.add('x')
>>> c.add('x')
>>> c.add('y')
>>> c.sorted_items()
[('x', 2), ('y', 1)]
"""
return [(k, self[k]) for k in self.sorted_keys()]
def __repr__(self):
return ''
counter = Counter
iters = [list, tuple, set, frozenset]
class _hack(tuple): pass
iters = _hack(iters)
iters.__doc__ = """
A list of iterable items (like lists, but not strings). Includes whichever
of lists, tuples, sets, and Sets are available in this version of Python.
"""
def _strips(direction, text, remove):
if isinstance(remove, iters):
for subr in remove:
text = _strips(direction, text, subr)
return text
if direction == 'l':
if text.startswith(remove):
return text[len(remove):]
elif direction == 'r':
if text.endswith(remove):
return text[:-len(remove)]
else:
raise ValueError("Direction needs to be r or l.")
return text
def rstrips(text, remove):
"""
removes the string `remove` from the right of `text`
>>> rstrips("foobar", "bar")
'foo'
"""
return _strips('r', text, remove)
def lstrips(text, remove):
"""
removes the string `remove` from the left of `text`
>>> lstrips("foobar", "foo")
'bar'
>>> lstrips('http://foo.org/', ['http://', 'https://'])
'foo.org/'
>>> lstrips('FOOBARBAZ', ['FOO', 'BAR'])
'BAZ'
>>> lstrips('FOOBARBAZ', ['BAR', 'FOO'])
'BARBAZ'
"""
return _strips('l', text, remove)
def strips(text, remove):
"""
removes the string `remove` from the both sides of `text`
>>> strips("foobarfoo", "foo")
'bar'
"""
return rstrips(lstrips(text, remove), remove)
def safeunicode(obj, encoding='utf-8'):
r"""
Converts any given object to unicode string.
>>> safeunicode('hello')
u'hello'
>>> safeunicode(2)
u'2'
>>> safeunicode('\xe1\x88\xb4')
u'\u1234'
"""
t = type(obj)
if t is text_type:
return obj
elif t is bytes:
return obj.decode(encoding)
elif t in [int, float, bool]:
return unicode(obj)
#elif hasattr(obj, '__unicode__') or isinstance(obj, unicode):
# return unicode(obj)
#else:
# return str(obj).decode(encoding)
else:
return unicode(obj)
def safestr(obj, encoding='utf-8'):
r"""
Converts any given object to utf-8 encoded string.
>>> safestr('hello')
'hello'
>>> safestr(2)
'2'
"""
if PY2 and isinstance(obj, unicode):
return obj.encode(encoding)
elif is_iter(obj):
return imap(safestr, obj)
else:
return str(obj)
if not PY2:
#Since Python3, utf-8 encoded strings and unicode strings are the same thing
safeunicode = safestr
def timelimit(timeout):
"""
A decorator to limit a function to `timeout` seconds, raising `TimeoutError`
if it takes longer.
>>> import time
>>> def meaningoflife():
... time.sleep(.2)
... return 42
>>>
>>> timelimit(.1)(meaningoflife)()
Traceback (most recent call last):
...
RuntimeError: took too long
>>> timelimit(1)(meaningoflife)()
42
_Caveat:_ The function isn't stopped after `timeout` seconds but continues
executing in a separate thread. (There seems to be no way to kill a thread.)
inspired by
"""
def _1(function):
def _2(*args, **kw):
class Dispatch(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
self.result = None
self.error = None
self.setDaemon(True)
self.start()
def run(self):
try:
self.result = function(*args, **kw)
except:
self.error = sys.exc_info()
c = Dispatch()
c.join(timeout)
if c.isAlive():
raise RuntimeError('took too long')
if c.error:
raise c.error[1]
return c.result
return _2
return _1
class Memoize:
"""
'Memoizes' a function, caching its return values for each input.
If `expires` is specified, values are recalculated after `expires` seconds.
If `background` is specified, values are recalculated in a separate thread.
>>> calls = 0
>>> def howmanytimeshaveibeencalled():
... global calls
... calls += 1
... return calls
>>> fastcalls = memoize(howmanytimeshaveibeencalled)
>>> howmanytimeshaveibeencalled()
1
>>> howmanytimeshaveibeencalled()
2
>>> fastcalls()
3
>>> fastcalls()
3
>>> import time
>>> fastcalls = memoize(howmanytimeshaveibeencalled, .1, background=False)
>>> fastcalls()
4
>>> fastcalls()
4
>>> time.sleep(.2)
>>> fastcalls()
5
>>> def slowfunc():
... time.sleep(.1)
... return howmanytimeshaveibeencalled()
>>> fastcalls = memoize(slowfunc, .2, background=True)
>>> fastcalls()
6
>>> timelimit(.05)(fastcalls)()
6
>>> time.sleep(.2)
>>> timelimit(.05)(fastcalls)()
6
>>> timelimit(.05)(fastcalls)()
6
>>> time.sleep(.2)
>>> timelimit(.05)(fastcalls)()
7
>>> fastcalls = memoize(slowfunc, None, background=True)
>>> threading.Thread(target=fastcalls).start()
>>> time.sleep(.01)
>>> fastcalls()
9
"""
def __init__(self, func, expires=None, background=True):
self.func = func
self.cache = {}
self.expires = expires
self.background = background
self.running = {}
def __call__(self, *args, **keywords):
key = (args, tuple(keywords.items()))
if not self.running.get(key):
self.running[key] = threading.Lock()
def update(block=False):
if self.running[key].acquire(block):
try:
self.cache[key] = (self.func(*args, **keywords), time.time())
finally:
self.running[key].release()
if key not in self.cache:
update(block=True)
elif self.expires and (time.time() - self.cache[key][1]) > self.expires:
if self.background:
threading.Thread(target=update).start()
else:
update()
return self.cache[key][0]
memoize = Memoize
re_compile = memoize(re.compile) #@@ threadsafe?
re_compile.__doc__ = """
A memoized version of re.compile.
"""
class _re_subm_proxy:
def __init__(self):
self.match = None
def __call__(self, match):
self.match = match
return ''
def re_subm(pat, repl, string):
"""
Like re.sub, but returns the replacement _and_ the match object.
>>> t, m = re_subm('g(oo+)fball', r'f\\1lish', 'goooooofball')
>>> t
'foooooolish'
>>> m.groups()
('oooooo',)
"""
compiled_pat = re_compile(pat)
proxy = _re_subm_proxy()
compiled_pat.sub(proxy.__call__, string)
return compiled_pat.sub(repl, string), proxy.match
def group(seq, size):
"""
Returns an iterator over a series of lists of length size from iterable.
>>> list(group([1,2,3,4], 2))
[[1, 2], [3, 4]]
>>> list(group([1,2,3,4,5], 2))
[[1, 2], [3, 4], [5]]
"""
def take(seq, n):
for i in range(n):
yield next(seq)
if not hasattr(seq, 'next'):
seq = iter(seq)
while True:
x = list(take(seq, size))
if x:
yield x
else:
break
def uniq(seq, key=None):
"""
Removes duplicate elements from a list while preserving the order of the rest.
>>> uniq([9,0,2,1,0])
[9, 0, 2, 1]
The value of the optional `key` parameter should be a function that
takes a single argument and returns a key to test the uniqueness.
>>> uniq(["Foo", "foo", "bar"], key=lambda s: s.lower())
['Foo', 'bar']
"""
key = key or (lambda x: x)
seen = set()
result = []
for v in seq:
k = key(v)
if k in seen:
continue
seen.add(k)
result.append(v)
return result
def iterview(x):
"""
Takes an iterable `x` and returns an iterator over it
which prints its progress to stderr as it iterates through.
"""
WIDTH = 70
def plainformat(n, lenx):
return '%5.1f%% (%*d/%d)' % ((float(n)/lenx)*100, len(str(lenx)), n, lenx)
def bars(size, n, lenx):
val = int((float(n)*size)/lenx + 0.5)
if size - val:
spacing = ">" + (" "*(size-val))[1:]
else:
spacing = ""
return "[%s%s]" % ("="*val, spacing)
def eta(elapsed, n, lenx):
if n == 0:
return '--:--:--'
if n == lenx:
secs = int(elapsed)
else:
secs = int((elapsed/n) * (lenx-n))
mins, secs = divmod(secs, 60)
hrs, mins = divmod(mins, 60)
return '%02d:%02d:%02d' % (hrs, mins, secs)
def format(starttime, n, lenx):
out = plainformat(n, lenx) + ' '
if n == lenx:
end = ' '
else:
end = ' ETA '
end += eta(time.time() - starttime, n, lenx)
out += bars(WIDTH - len(out) - len(end), n, lenx)
out += end
return out
starttime = time.time()
lenx = len(x)
for n, y in enumerate(x):
sys.stderr.write('\r' + format(starttime, n, lenx))
yield y
sys.stderr.write('\r' + format(starttime, n+1, lenx) + '\n')
class IterBetter:
"""
Returns an object that can be used as an iterator
but can also be used via __getitem__ (although it
cannot go backwards -- that is, you cannot request
`iterbetter[0]` after requesting `iterbetter[1]`).
>>> import itertools
>>> c = iterbetter(itertools.count())
>>> c[1]
1
>>> c[5]
5
>>> c[3]
Traceback (most recent call last):
...
IndexError: already passed 3
It is also possible to get the first value of the iterator or None.
>>> c = iterbetter(iter([3, 4, 5]))
>>> print(c.first())
3
>>> c = iterbetter(iter([]))
>>> print(c.first())
None
For boolean test, IterBetter peeps at first value in the itertor without effecting the iteration.
>>> c = iterbetter(iter(range(5)))
>>> bool(c)
True
>>> list(c)
[0, 1, 2, 3, 4]
>>> c = iterbetter(iter([]))
>>> bool(c)
False
>>> list(c)
[]
"""
def __init__(self, iterator):
self.i, self.c = iterator, 0
def first(self, default=None):
"""Returns the first element of the iterator or None when there are no
elements.
If the optional argument default is specified, that is returned instead
of None when there are no elements.
"""
try:
return next(iter(self))
except StopIteration:
return default
def __iter__(self):
if hasattr(self, "_head"):
yield self._head
while 1:
yield next(self.i)
self.c += 1
def __getitem__(self, i):
#todo: slices
if i < self.c:
raise IndexError("already passed "+str(i))
try:
while i > self.c:
next(self.i)
self.c += 1
# now self.c == i
self.c += 1
return next(self.i)
except StopIteration:
raise IndexError(str(i))
def __nonzero__(self):
if hasattr(self, "__len__"):
return self.__len__() != 0
elif hasattr(self, "_head"):
return True
else:
try:
self._head = next(self.i)
except StopIteration:
return False
else:
return True
__bool__ = __nonzero__
iterbetter = IterBetter
def safeiter(it, cleanup=None, ignore_errors=True):
"""Makes an iterator safe by ignoring the exceptions occured during the iteration.
"""
def next():
while True:
try:
return next(it)
except StopIteration:
raise
except:
traceback.print_exc()
it = iter(it)
while True:
yield next()
def safewrite(filename, content):
"""Writes the content to a temp file and then moves the temp file to
given filename to avoid overwriting the existing file in case of errors.
"""
f = file(filename + '.tmp', 'w')
f.write(content)
f.close()
os.rename(f.name, filename)
def dictreverse(mapping):
"""
Returns a new dictionary with keys and values swapped.
>>> dictreverse({1: 2, 3: 4})
{2: 1, 4: 3}
"""
return dict([(value, key) for (key, value) in iteritems(mapping)])
def dictfind(dictionary, element):
"""
Returns a key whose value in `dictionary` is `element`
or, if none exists, None.
>>> d = {1:2, 3:4}
>>> dictfind(d, 4)
3
>>> dictfind(d, 5)
"""
for (key, value) in iteritems(dictionary):
if element is value:
return key
def dictfindall(dictionary, element):
"""
Returns the keys whose values in `dictionary` are `element`
or, if none exists, [].
>>> d = {1:4, 3:4}
>>> dictfindall(d, 4)
[1, 3]
>>> dictfindall(d, 5)
[]
"""
res = []
for (key, value) in iteritems(dictionary):
if element is value:
res.append(key)
return res
def dictincr(dictionary, element):
"""
Increments `element` in `dictionary`,
setting it to one if it doesn't exist.
>>> d = {1:2, 3:4}
>>> dictincr(d, 1)
3
>>> d[1]
3
>>> dictincr(d, 5)
1
>>> d[5]
1
"""
dictionary.setdefault(element, 0)
dictionary[element] += 1
return dictionary[element]
def dictadd(*dicts):
"""
Returns a dictionary consisting of the keys in the argument dictionaries.
If they share a key, the value from the last argument is used.
>>> dictadd({1: 0, 2: 0}, {2: 1, 3: 1})
{1: 0, 2: 1, 3: 1}
"""
result = {}
for dct in dicts:
result.update(dct)
return result
def requeue(queue, index=-1):
"""Returns the element at index after moving it to the beginning of the queue.
>>> x = [1, 2, 3, 4]
>>> requeue(x)
4
>>> x
[4, 1, 2, 3]
"""
x = queue.pop(index)
queue.insert(0, x)
return x
def restack(stack, index=0):
"""Returns the element at index after moving it to the top of stack.
>>> x = [1, 2, 3, 4]
>>> restack(x)
1
>>> x
[2, 3, 4, 1]
"""
x = stack.pop(index)
stack.append(x)
return x
def listget(lst, ind, default=None):
"""
Returns `lst[ind]` if it exists, `default` otherwise.
>>> listget(['a'], 0)
'a'
>>> listget(['a'], 1)
>>> listget(['a'], 1, 'b')
'b'
"""
if len(lst)-1 < ind:
return default
return lst[ind]
def intget(integer, default=None):
"""
Returns `integer` as an int or `default` if it can't.
>>> intget('3')
3
>>> intget('3a')
>>> intget('3a', 0)
0
"""
try:
return int(integer)
except (TypeError, ValueError):
return default
def datestr(then, now=None):
"""
Converts a (UTC) datetime object to a nice string representation.
>>> from datetime import datetime, timedelta
>>> d = datetime(1970, 5, 1)
>>> datestr(d, now=d)
'0 microseconds ago'
>>> for t, v in iteritems({
... timedelta(microseconds=1): '1 microsecond ago',
... timedelta(microseconds=2): '2 microseconds ago',
... -timedelta(microseconds=1): '1 microsecond from now',
... -timedelta(microseconds=2): '2 microseconds from now',
... timedelta(microseconds=2000): '2 milliseconds ago',
... timedelta(seconds=2): '2 seconds ago',
... timedelta(seconds=2*60): '2 minutes ago',
... timedelta(seconds=2*60*60): '2 hours ago',
... timedelta(days=2): '2 days ago',
... }):
... assert datestr(d, now=d+t) == v
>>> datestr(datetime(1970, 1, 1), now=d)
'January 1'
>>> datestr(datetime(1969, 1, 1), now=d)
'January 1, 1969'
>>> datestr(datetime(1970, 6, 1), now=d)
'June 1, 1970'
>>> datestr(None)
''
"""
def agohence(n, what, divisor=None):
if divisor: n = n // divisor
out = str(abs(n)) + ' ' + what # '2 day'
if abs(n) != 1: out += 's' # '2 days'
out += ' ' # '2 days '
if n < 0:
out += 'from now'
else:
out += 'ago'
return out # '2 days ago'
oneday = 24 * 60 * 60
if not then: return ""
if not now: now = datetime.datetime.utcnow()
if type(now).__name__ == "DateTime":
now = datetime.datetime.fromtimestamp(now)
if type(then).__name__ == "DateTime":
then = datetime.datetime.fromtimestamp(then)
elif type(then).__name__ == "date":
then = datetime.datetime(then.year, then.month, then.day)
delta = now - then
deltaseconds = int(delta.days * oneday + delta.seconds + delta.microseconds * 1e-06)
deltadays = abs(deltaseconds) // oneday
if deltaseconds < 0: deltadays *= -1 # fix for oddity of floor
if deltadays:
if abs(deltadays) < 4:
return agohence(deltadays, 'day')
# Trick to display 'June 3' instead of 'June 03'
# Even though the %e format in strftime does that, it doesn't work on Windows.
out = then.strftime('%B %d').replace(" 0", " ")
if then.year != now.year or deltadays < 0:
out += ', %s' % then.year
return out
if int(deltaseconds):
if abs(deltaseconds) > (60 * 60):
return agohence(deltaseconds, 'hour', 60 * 60)
elif abs(deltaseconds) > 60:
return agohence(deltaseconds, 'minute', 60)
else:
return agohence(deltaseconds, 'second')
deltamicroseconds = delta.microseconds
if delta.days: deltamicroseconds = int(delta.microseconds - 1e6) # datetime oddity
if abs(deltamicroseconds) > 1000:
return agohence(deltamicroseconds, 'millisecond', 1000)
return agohence(deltamicroseconds, 'microsecond')
def numify(string):
"""
Removes all non-digit characters from `string`.
>>> numify('800-555-1212')
'8005551212'
>>> numify('800.555.1212')
'8005551212'
"""
return ''.join([c for c in str(string) if c.isdigit()])
def denumify(string, pattern):
"""
Formats `string` according to `pattern`, where the letter X gets replaced
by characters from `string`.
>>> denumify("8005551212", "(XXX) XXX-XXXX")
'(800) 555-1212'
"""
out = []
for c in pattern:
if c == "X":
out.append(string[0])
string = string[1:]
else:
out.append(c)
return ''.join(out)
def commify(n):
"""
Add commas to an integer `n`.
>>> commify(1)
'1'
>>> commify(123)
'123'
>>> commify(-123)
'-123'
>>> commify(1234)
'1,234'
>>> commify(1234567890)
'1,234,567,890'
>>> commify(123.0)
'123.0'
>>> commify(1234.5)
'1,234.5'
>>> commify(1234.56789)
'1,234.56789'
>>> commify(' %.2f ' % -1234.5)
'-1,234.50'
>>> commify(None)
>>>
"""
if n is None: return None
n = str(n).strip()
if n.startswith('-'):
prefix = '-'
n = n[1:].strip()
else:
prefix = ''
if '.' in n:
dollars, cents = n.split('.')
else:
dollars, cents = n, None
r = []
for i, c in enumerate(str(dollars)[::-1]):
if i and (not (i % 3)):
r.insert(0, ',')
r.insert(0, c)
out = ''.join(r)
if cents:
out += '.' + cents
return prefix + out
def dateify(datestring):
"""
Formats a numified `datestring` properly.
"""
return denumify(datestring, "XXXX-XX-XX XX:XX:XX")
def nthstr(n):
"""
Formats an ordinal.
Doesn't handle negative numbers.
>>> nthstr(1)
'1st'
>>> nthstr(0)
'0th'
>>> [nthstr(x) for x in [2, 3, 4, 5, 10, 11, 12, 13, 14, 15]]
['2nd', '3rd', '4th', '5th', '10th', '11th', '12th', '13th', '14th', '15th']
>>> [nthstr(x) for x in [91, 92, 93, 94, 99, 100, 101, 102]]
['91st', '92nd', '93rd', '94th', '99th', '100th', '101st', '102nd']
>>> [nthstr(x) for x in [111, 112, 113, 114, 115]]
['111th', '112th', '113th', '114th', '115th']
"""
assert n >= 0
if n % 100 in [11, 12, 13]: return '%sth' % n
return {1: '%sst', 2: '%snd', 3: '%srd'}.get(n % 10, '%sth') % n
def cond(predicate, consequence, alternative=None):
"""
Function replacement for if-else to use in expressions.
>>> x = 2
>>> cond(x % 2 == 0, "even", "odd")
'even'
>>> cond(x % 2 == 0, "even", "odd") + '_row'
'even_row'
"""
if predicate:
return consequence
else:
return alternative
class CaptureStdout:
"""
Captures everything `func` prints to stdout and returns it instead.
>>> def idiot():
... print("foo")
>>> capturestdout(idiot)()
'foo\\n'
**WARNING:** Not threadsafe!
"""
def __init__(self, func):
self.func = func
def __call__(self, *args, **keywords):
out = StringIO()
oldstdout = sys.stdout
sys.stdout = out
try:
self.func(*args, **keywords)
finally:
sys.stdout = oldstdout
return out.getvalue()
capturestdout = CaptureStdout
class Profile:
"""
Profiles `func` and returns a tuple containing its output
and a string with human-readable profiling information.
>>> import time
>>> out, inf = profile(time.sleep)(.001)
>>> out
>>> inf[:10].strip()
'took 0.0'
"""
def __init__(self, func):
self.func = func
def __call__(self, *args): ##, **kw): kw unused
import cProfile, pstats, os, tempfile ##, time already imported
f, filename = tempfile.mkstemp()
os.close(f)
prof = cProfile.Profile()
stime = time.time()
result = prof.runcall(self.func, *args)
stime = time.time() - stime
out = StringIO()
stats = pstats.Stats(prof, stream=out)
stats.strip_dirs()
stats.sort_stats('time', 'calls')
stats.print_stats(40)
stats.print_callers()
x = '\n\ntook '+ str(stime) + ' seconds\n'
x += out.getvalue()
# remove the tempfile
try:
os.remove(filename)
except IOError:
pass
return result, x
profile = Profile
import traceback
# hack for compatibility with Python 2.3:
if not hasattr(traceback, 'format_exc'):
from cStringIO import StringIO
def format_exc(limit=None):
strbuf = StringIO()
traceback.print_exc(limit, strbuf)
return strbuf.getvalue()
traceback.format_exc = format_exc
def tryall(context, prefix=None):
"""
Tries a series of functions and prints their results.
`context` is a dictionary mapping names to values;
the value will only be tried if it's callable.
>>> tryall(dict(j=lambda: True))
j: True
----------------------------------------
results:
True: 1
For example, you might have a file `test/stuff.py`
with a series of functions testing various things in it.
At the bottom, have a line:
if __name__ == "__main__": tryall(globals())
Then you can run `python test/stuff.py` and get the results of
all the tests.
"""
context = context.copy() # vars() would update
results = {}
for (key, value) in iteritems(context):
if not hasattr(value, '__call__'):
continue
if prefix and not key.startswith(prefix):
continue
print(key + ':', end=" ")
try:
r = value()
dictincr(results, r)
print(r)
except:
print('ERROR')
dictincr(results, 'ERROR')
print(' ' + '\n '.join(traceback.format_exc().split('\n')))
print('-'*40)
print('results:')
for (key, value) in iteritems(results):
print(' '*2, str(key)+':', value)
class ThreadedDict(threadlocal):
"""
Thread local storage.
>>> d = ThreadedDict()
>>> d.x = 1
>>> d.x
1
>>> import threading
>>> def f(): d.x = 2
...
>>> t = threading.Thread(target=f)
>>> t.start()
>>> t.join()
>>> d.x
1
"""
_instances = set()
def __init__(self):
ThreadedDict._instances.add(self)
def __del__(self):
ThreadedDict._instances.remove(self)
def __hash__(self):
return id(self)
def clear_all():
"""Clears all ThreadedDict instances.
"""
for t in list(ThreadedDict._instances):
t.clear()
clear_all = staticmethod(clear_all)
# Define all these methods to more or less fully emulate dict -- attribute access
# is built into threading.local.
def __getitem__(self, key):
return self.__dict__[key]
def __setitem__(self, key, value):
self.__dict__[key] = value
def __delitem__(self, key):
del self.__dict__[key]
def __contains__(self, key):
return key in self.__dict__
has_key = __contains__
def clear(self):
self.__dict__.clear()
def copy(self):
return self.__dict__.copy()
def get(self, key, default=None):
return self.__dict__.get(key, default)
def items(self):
return self.__dict__.items()
def iteritems(self):
return iteritems(self.__dict__)
def keys(self):
return self.__dict__.keys()
def iterkeys(self):
return iterkeys(self.__dict__)
iter = iterkeys
def values(self):
return self.__dict__.values()
def itervalues(self):
return itervalues(self.__dict__)
def pop(self, key, *args):
return self.__dict__.pop(key, *args)
def popitem(self):
return self.__dict__.popitem()
def setdefault(self, key, default=None):
return self.__dict__.setdefault(key, default)
def update(self, *args, **kwargs):
self.__dict__.update(*args, **kwargs)
def __repr__(self):
return '' % self.__dict__
__str__ = __repr__
threadeddict = ThreadedDict
def autoassign(self, locals):
"""
Automatically assigns local variables to `self`.
>>> self = storage()
>>> autoassign(self, dict(a=1, b=2))
>>> self.a
1
>>> self.b
2
Generally used in `__init__` methods, as in:
def __init__(self, foo, bar, baz=1): autoassign(self, locals())
"""
for (key, value) in iteritems(locals):
if key == 'self':
continue
setattr(self, key, value)
def to36(q):
"""
Converts an integer to base 36 (a useful scheme for human-sayable IDs).
>>> to36(35)
'z'
>>> to36(119292)
'2k1o'
>>> int(to36(939387374), 36)
939387374
>>> to36(0)
'0'
>>> to36(-393)
Traceback (most recent call last):
...
ValueError: must supply a positive integer
"""
if q < 0: raise ValueError("must supply a positive integer")
letters = "0123456789abcdefghijklmnopqrstuvwxyz"
converted = []
while q != 0:
q, r = divmod(q, 36)
converted.insert(0, letters[r])
return "".join(converted) or '0'
r_url = re_compile('(?', text)
text = markdown(text)
return text
def sendmail(from_address, to_address, subject, message, headers=None, **kw):
"""
Sends the email message `message` with mail and envelope headers
for from `from_address_` to `to_address` with `subject`.
Additional email headers can be specified with the dictionary
`headers.
Optionally cc, bcc and attachments can be specified as keyword arguments.
Attachments must be an iterable and each attachment can be either a
filename or a file object or a dictionary with filename, content and
optionally content_type keys.
If `web.config.smtp_server` is set, it will send the message
to that SMTP server. Otherwise it will look for
`/usr/sbin/sendmail`, the typical location for the sendmail-style
binary. To use sendmail from a different path, set `web.config.sendmail_path`.
"""
attachments = kw.pop("attachments", [])
mail = _EmailMessage(from_address, to_address, subject, message, headers, **kw)
for a in attachments:
if isinstance(a, dict):
mail.attach(a['filename'], a['content'], a.get('content_type'))
elif hasattr(a, 'read'): # file
filename = os.path.basename(getattr(a, "name", ""))
content_type = getattr(a, 'content_type', None)
mail.attach(filename, a.read(), content_type)
elif isinstance(a, basestring):
f = open(a, 'rb')
content = f.read()
f.close()
filename = os.path.basename(a)
mail.attach(filename, content, None)
else:
raise ValueError("Invalid attachment: %s" % repr(a))
mail.send()
class _EmailMessage:
def __init__(self, from_address, to_address, subject, message, headers=None, **kw):
def listify(x):
if not isinstance(x, list):
return [safestr(x)]
else:
return [safestr(a) for a in x]
subject = safestr(subject)
message = safestr(message)
from_address = safestr(from_address)
to_address = listify(to_address)
cc = listify(kw.get('cc', []))
bcc = listify(kw.get('bcc', []))
recipients = to_address + cc + bcc
import email.utils
self.from_address = email.utils.parseaddr(from_address)[1]
self.recipients = [email.utils.parseaddr(r)[1] for r in recipients]
self.headers = dictadd({
'From': from_address,
'To': ", ".join(to_address),
'Subject': subject
}, headers or {})
if cc:
self.headers['Cc'] = ", ".join(cc)
self.message = self.new_message()
self.message.add_header("Content-Transfer-Encoding", "7bit")
self.message.add_header("Content-Disposition", "inline")
self.message.add_header("MIME-Version", "1.0")
self.message.set_payload(message, 'utf-8')
self.multipart = False
def new_message(self):
from email.message import Message
return Message()
def attach(self, filename, content, content_type=None):
if not self.multipart:
msg = self.new_message()
msg.add_header("Content-Type", "multipart/mixed")
msg.attach(self.message)
self.message = msg
self.multipart = True
import mimetypes
try:
from email import encoders
except:
from email import Encoders as encoders
content_type = content_type or mimetypes.guess_type(filename)[0] or "application/octet-stream"
msg = self.new_message()
msg.set_payload(content)
msg.add_header('Content-Type', content_type)
msg.add_header('Content-Disposition', 'attachment', filename=filename)
if not content_type.startswith("text/"):
encoders.encode_base64(msg)
self.message.attach(msg)
def prepare_message(self):
for k, v in iteritems(self.headers):
if k.lower() == "content-type":
self.message.set_type(v)
else:
self.message.add_header(k, v)
self.headers = {}
def send(self):
try:
from . import webapi
except ImportError:
webapi = Storage(config=Storage())
self.prepare_message()
message_text = self.message.as_string()
if webapi.config.get('smtp_server'):
server = webapi.config.get('smtp_server')
port = webapi.config.get('smtp_port', 0)
username = webapi.config.get('smtp_username')
password = webapi.config.get('smtp_password')
debug_level = webapi.config.get('smtp_debuglevel', None)
starttls = webapi.config.get('smtp_starttls', False)
import smtplib
smtpserver = smtplib.SMTP(server, port)
if debug_level:
smtpserver.set_debuglevel(debug_level)
if starttls:
smtpserver.ehlo()
smtpserver.starttls()
smtpserver.ehlo()
if username and password:
smtpserver.login(username, password)
smtpserver.sendmail(self.from_address, self.recipients, message_text)
smtpserver.quit()
elif webapi.config.get('email_engine') == 'aws':
import boto.ses
c = boto.ses.SESConnection(
aws_access_key_id=webapi.config.get('aws_access_key_id'),
aws_secret_access_key=webapi.config.get('aws_secret_access_key'))
c.send_raw_email(self.from_address, message_text, self.recipients)
else:
sendmail = webapi.config.get('sendmail_path', '/usr/sbin/sendmail')
assert not self.from_address.startswith('-'), 'security'
for r in self.recipients:
assert not r.startswith('-'), 'security'
cmd = [sendmail, '-f', self.from_address] + self.recipients
p = subprocess.Popen(cmd, stdin=subprocess.PIPE)
p.stdin.write(message_text.encode('utf-8'))
p.stdin.close()
p.wait()
def __repr__(self):
return ""
def __str__(self):
return self.message.as_string()
if __name__ == "__main__":
import doctest
doctest.testmod()
webpy/web/__init__.py 0000644 0001750 0001750 00000001367 13146625266 013250 0 ustar wmb wmb #!/usr/bin/env python
"""web.py: makes web apps (http://webpy.org)"""
from __future__ import generators
__version__ = "0.40-dev0"
__author__ = [
"Aaron Swartz ",
"Anand Chitipothu "
]
__license__ = "public domain"
__contributors__ = "see http://webpy.org/changes"
from . import utils, db, net, wsgi, http, webapi, httpserver, debugerror
from . import template, form
from . import session
from .utils import *
from .db import *
from .net import *
from .wsgi import *
from .http import *
from .webapi import *
from .httpserver import *
from .debugerror import *
from .application import *
#from browser import *
try:
from . import webopenid as openid
except ImportError:
pass # requires openid module
webpy/web/browser.py 0000644 0001750 0001750 00000021012 13146625266 013161 0 ustar wmb wmb """Browser to test web applications.
(from web.py)
"""
from .utils import re_compile
from .net import htmlunquote
from io import BytesIO, StringIO
import copy
from .py3helpers import PY2
#Welcome to the Py2->Py3 httplib/urllib reorganization nightmare.
if PY2:
get_selector = lambda x: x.get_selector()
get_host = lambda x: x.get_host()
get_data = lambda x: x.get_data()
get_type = lambda x: x.get_type()
else:
get_selector = lambda x: x.selector
get_host = lambda x: x.host
get_data = lambda x: x.data
get_type = lambda x: x.type
try: #Py3
from http.client import HTTPMessage
from urllib.request import HTTPHandler, HTTPCookieProcessor, build_opener, Request, HTTPError
from urllib.request import build_opener as urllib_build_opener
from urllib.parse import urljoin
from http.cookiejar import CookieJar
from urllib.response import addinfourl
except ImportError: #Py2
from httplib import HTTPMessage
from urllib import addinfourl
from urllib2 import HTTPHandler, HTTPCookieProcessor, Request, HTTPError
from urllib2 import build_opener as urllib_build_opener
from cookielib import CookieJar
from urlparse import urljoin
DEBUG = False
__all__ = [
"BrowserError",
"Browser", "AppBrowser",
"AppHandler"
]
class BrowserError(Exception):
pass
class Browser:
def __init__(self):
self.cookiejar = CookieJar()
self._cookie_processor = HTTPCookieProcessor(self.cookiejar)
self.form = None
self.url = "http://0.0.0.0:8080/"
self.path = "/"
self.status = None
self.data = None
self._response = None
self._forms = None
def reset(self):
"""Clears all cookies and history."""
self.cookiejar.clear()
def build_opener(self):
"""Builds the opener using (urllib2/urllib.request).build_opener.
Subclasses can override this function to prodive custom openers.
"""
return urllib_build_opener()
def do_request(self, req):
if DEBUG:
print('requesting', req.get_method(), req.get_full_url())
opener = self.build_opener()
opener.add_handler(self._cookie_processor)
try:
self._response = opener.open(req)
except HTTPError as e:
self._response = e
self.url = self._response.geturl()
self.path = get_selector(Request(self.url))
self.data = self._response.read()
self.status = self._response.code
self._forms = None
self.form = None
return self.get_response()
def open(self, url, data=None, headers={}):
"""Opens the specified url."""
url = urljoin(self.url, url)
req = Request(url, data, headers)
return self.do_request(req)
def show(self):
"""Opens the current page in real web browser."""
f = open('page.html', 'w')
f.write(self.data)
f.close()
import webbrowser, os
url = 'file://' + os.path.abspath('page.html')
webbrowser.open(url)
def get_response(self):
"""Returns a copy of the current response."""
return addinfourl(BytesIO(self.data), self._response.info(), self._response.geturl())
def get_soup(self):
"""Returns beautiful soup of the current document."""
import BeautifulSoup
return BeautifulSoup.BeautifulSoup(self.data)
def get_text(self, e=None):
"""Returns content of e or the current document as plain text."""
e = e or self.get_soup()
return ''.join([htmlunquote(c) for c in e.recursiveChildGenerator() if isinstance(c, unicode)])
def _get_links(self):
soup = self.get_soup()
return [a for a in soup.findAll(name='a')]
def get_links(self, text=None, text_regex=None, url=None, url_regex=None, predicate=None):
"""Returns all links in the document."""
return self._filter_links(self._get_links(),
text=text, text_regex=text_regex, url=url, url_regex=url_regex, predicate=predicate)
def follow_link(self, link=None, text=None, text_regex=None, url=None, url_regex=None, predicate=None):
if link is None:
links = self._filter_links(self.get_links(),
text=text, text_regex=text_regex, url=url, url_regex=url_regex, predicate=predicate)
link = links and links[0]
if link:
return self.open(link['href'])
else:
raise BrowserError("No link found")
def find_link(self, text=None, text_regex=None, url=None, url_regex=None, predicate=None):
links = self._filter_links(self.get_links(),
text=text, text_regex=text_regex, url=url, url_regex=url_regex, predicate=predicate)
return links and links[0] or None
def _filter_links(self, links,
text=None, text_regex=None,
url=None, url_regex=None,
predicate=None):
predicates = []
if text is not None:
predicates.append(lambda link: link.string == text)
if text_regex is not None:
predicates.append(lambda link: re_compile(text_regex).search(link.string or ''))
if url is not None:
predicates.append(lambda link: link.get('href') == url)
if url_regex is not None:
predicates.append(lambda link: re_compile(url_regex).search(link.get('href', '')))
if predicate:
predicate.append(predicate)
def f(link):
for p in predicates:
if not p(link):
return False
return True
return [link for link in links if f(link)]
def get_forms(self):
"""Returns all forms in the current document.
The returned form objects implement the ClientForm.HTMLForm interface.
"""
if self._forms is None:
import ClientForm
self._forms = ClientForm.ParseResponse(self.get_response(), backwards_compat=False)
return self._forms
def select_form(self, name=None, predicate=None, index=0):
"""Selects the specified form."""
forms = self.get_forms()
if name is not None:
forms = [f for f in forms if f.name == name]
if predicate:
forms = [f for f in forms if predicate(f)]
if forms:
self.form = forms[index]
return self.form
else:
raise BrowserError("No form selected.")
def submit(self, **kw):
"""submits the currently selected form."""
if self.form is None:
raise BrowserError("No form selected.")
req = self.form.click(**kw)
return self.do_request(req)
def __getitem__(self, key):
return self.form[key]
def __setitem__(self, key, value):
self.form[key] = value
class AppBrowser(Browser):
"""Browser interface to test web.py apps.
b = AppBrowser(app)
b.open('/')
b.follow_link(text='Login')
b.select_form(name='login')
b['username'] = 'joe'
b['password'] = 'secret'
b.submit()
assert b.path == '/'
assert 'Welcome joe' in b.get_text()
"""
def __init__(self, app):
Browser.__init__(self)
self.app = app
def build_opener(self):
return urllib_build_opener(AppHandler(self.app))
class AppHandler(HTTPHandler):
"""urllib2 handler to handle requests using web.py application."""
handler_order = 100
def __init__(self, app):
self.app = app
def http_open(self, req):
result = self.app.request(
localpart=get_selector(req),
method=req.get_method(),
host=get_host(req),
data=get_data(req),
headers=dict(req.header_items()),
https=get_type(req) == "https"
)
return self._make_response(result, req.get_full_url())
def https_open(self, req):
return self.http_open(req)
try:
https_request = HTTPHandler.do_request_
except AttributeError:
# for python 2.3
pass
def _make_response(self, result, url):
data = "\r\n".join(["%s: %s" % (k, v) for k, v in result.header_items])
if PY2:
headers = HTTPMessage(BytesIO(data))
else:
import email
headers = email.message_from_string(data)
response = addinfourl(BytesIO(result.data), headers, url)
code, msg = result.status.split(None, 1)
response.code, response.msg = int(code), msg
return response
webpy/web/contrib/ 0000755 0001750 0001750 00000000000 13146625266 012570 5 ustar wmb wmb webpy/web/contrib/__init__.py 0000644 0001750 0001750 00000000000 13146625266 014667 0 ustar wmb wmb webpy/web/contrib/template.py 0000644 0001750 0001750 00000006571 13146625266 014766 0 ustar wmb wmb """
Interface to various templating engines.
"""
import os.path
__all__ = [
"render_cheetah", "render_genshi", "render_mako",
"cache",
]
class render_cheetah:
"""Rendering interface to Cheetah Templates.
Example:
render = render_cheetah('templates')
render.hello(name="cheetah")
"""
def __init__(self, path):
# give error if Chetah is not installed
from Cheetah.Template import Template
self.path = path
def __getattr__(self, name):
from Cheetah.Template import Template
path = os.path.join(self.path, name + ".html")
def template(**kw):
t = Template(file=path, searchList=[kw])
return t.respond()
return template
class render_genshi:
"""Rendering interface genshi templates.
Example:
for xml/html templates.
render = render_genshi(['templates/'])
render.hello(name='genshi')
For text templates:
render = render_genshi(['templates/'], type='text')
render.hello(name='genshi')
"""
def __init__(self, *a, **kwargs):
from genshi.template import TemplateLoader
self._type = kwargs.pop('type', None)
self._loader = TemplateLoader(*a, **kwargs)
def __getattr__(self, name):
# Assuming all templates are html
path = name + ".html"
if self._type == "text":
from genshi.template import TextTemplate
cls = TextTemplate
type = "text"
else:
cls = None
type = None
t = self._loader.load(path, cls=cls)
def template(**kw):
stream = t.generate(**kw)
if type:
return stream.render(type)
else:
return stream.render()
return template
class render_jinja:
"""Rendering interface to Jinja2 Templates
Example:
render= render_jinja('templates')
render.hello(name='jinja2')
"""
def __init__(self, *a, **kwargs):
extensions = kwargs.pop('extensions', [])
globals = kwargs.pop('globals', {})
from jinja2 import Environment,FileSystemLoader
self._lookup = Environment(loader=FileSystemLoader(*a, **kwargs), extensions=extensions)
self._lookup.globals.update(globals)
def __getattr__(self, name):
# Assuming all templates end with .html
path = name + '.html'
t = self._lookup.get_template(path)
return t.render
class render_mako:
"""Rendering interface to Mako Templates.
Example:
render = render_mako(directories=['templates'])
render.hello(name="mako")
"""
def __init__(self, *a, **kwargs):
from mako.lookup import TemplateLookup
self._lookup = TemplateLookup(*a, **kwargs)
def __getattr__(self, name):
# Assuming all templates are html
path = name + ".html"
t = self._lookup.get_template(path)
return t.render
class cache:
"""Cache for any rendering interface.
Example:
render = cache(render_cheetah("templates/"))
render.hello(name='cache')
"""
def __init__(self, render):
self._render = render
self._cache = {}
def __getattr__(self, name):
if name not in self._cache:
self._cache[name] = getattr(self._render, name)
return self._cache[name]
webpy/web/webopenid.py 0000644 0001750 0001750 00000007173 13146625266 013466 0 ustar wmb wmb """openid.py: an openid library for web.py
Notes:
- This will create a file called .openid_secret_key in the
current directory with your secret key in it. If someone
has access to this file they can log in as any user. And
if the app can't find this file for any reason (e.g. you
moved the app somewhere else) then each currently logged
in user will get logged out.
- State must be maintained through the entire auth process
-- this means that if you have multiple web.py processes
serving one set of URLs or if you restart your app often
then log ins will fail. You have to replace sessions and
store for things to work.
- We set cookies starting with "openid_".
"""
import os
import random
import hmac
import __init__ as web
import openid.consumer.consumer
import openid.store.memstore
sessions = {}
store = openid.store.memstore.MemoryStore()
def _secret():
try:
secret = file('.openid_secret_key').read()
except IOError:
# file doesn't exist
secret = os.urandom(20)
file('.openid_secret_key', 'w').write(secret)
return secret
def _hmac(identity_url):
return hmac.new(_secret(), identity_url).hexdigest()
def _random_session():
n = random.random()
while n in sessions:
n = random.random()
n = str(n)
return n
def status():
oid_hash = web.cookies().get('openid_identity_hash', '').split(',', 1)
if len(oid_hash) > 1:
oid_hash, identity_url = oid_hash
if oid_hash == _hmac(identity_url):
return identity_url
return None
def form(openid_loc):
oid = status()
if oid:
return '''
%s
log out
''' % (openid_loc, oid, web.ctx.fullpath)
else:
return '''
log in
''' % (openid_loc, web.ctx.fullpath)
def logout():
web.setcookie('openid_identity_hash', '', expires=-1)
class host:
def POST(self):
# unlike the usual scheme of things, the POST is actually called
# first here
i = web.input(return_to='/')
if i.get('action') == 'logout':
logout()
return web.redirect(i.return_to)
i = web.input('openid', return_to='/')
n = _random_session()
sessions[n] = {'webpy_return_to': i.return_to}
c = openid.consumer.consumer.Consumer(sessions[n], store)
a = c.begin(i.openid)
f = a.redirectURL(web.ctx.home, web.ctx.home + web.ctx.fullpath)
web.setcookie('openid_session_id', n)
return web.redirect(f)
def GET(self):
n = web.cookies('openid_session_id').openid_session_id
web.setcookie('openid_session_id', '', expires=-1)
return_to = sessions[n]['webpy_return_to']
c = openid.consumer.consumer.Consumer(sessions[n], store)
a = c.complete(web.input(), web.ctx.home + web.ctx.fullpath)
if a.status.lower() == 'success':
web.setcookie('openid_identity_hash', _hmac(a.identity_url) + ',' + a.identity_url)
del sessions[n]
return web.redirect(return_to)
webpy/web/db.py 0000644 0001750 0001750 00000126553 13146625266 012103 0 ustar wmb wmb """
Database API
(part of web.py)
"""
from __future__ import print_function
from .utils import threadeddict, storage, iters, iterbetter, safestr, safeunicode
import datetime, time, os, urllib, re
from .py3helpers import PY2, string_types, numeric_types, iteritems
try:
from urllib import parse as urlparse
from urllib.parse import unquote
except ImportError:
import urlparse
from urllib import unquote
try:
# db module can work independent of web.py
from .webapi import debug, config
except:
import sys
debug = sys.stderr
config = storage()
__all__ = [
"UnknownParamstyle", "UnknownDB", "TransactionError",
"sqllist", "sqlors", "reparam", "sqlquote",
"SQLQuery", "SQLParam", "sqlparam",
"SQLLiteral", "sqlliteral",
"database", 'DB',
]
TOKEN = '[ \\f\\t]*(\\\\\\r?\\n[ \\f\\t]*)*(#[^\\r\\n]*)?(((\\d+[jJ]|((\\d+\\.\\d*|\\.\\d+)([eE][-+]?\\d+)?|\\d+[eE][-+]?\\d+)[jJ])|((\\d+\\.\\d*|\\.\\d+)([eE][-+]?\\d+)?|\\d+[eE][-+]?\\d+)|(0[xX][\\da-fA-F]+[lL]?|0[bB][01]+[lL]?|(0[oO][0-7]+)|(0[0-7]*)[lL]?|[1-9]\\d*[lL]?))|((\\*\\*=?|>>=?|<<=?|<>|!=|//=?|[+\\-*/%&|^=<>]=?|~)|[][(){}]|(\\r?\\n|[:;.,`@]))|([uUbB]?[rR]?\'[^\\n\'\\\\]*(?:\\\\.[^\\n\'\\\\]*)*\'|[uUbB]?[rR]?"[^\\n"\\\\]*(?:\\\\.[^\\n"\\\\]*)*")|[a-zA-Z_]\\w*)'
tokenprog = re.compile(TOKEN)
class UnknownDB(Exception):
"""raised for unsupported dbms"""
pass
class _ItplError(ValueError):
def __init__(self, text, pos):
ValueError.__init__(self)
self.text = text
self.pos = pos
def __str__(self):
return "unfinished expression in %s at char %d" % (
repr(self.text), self.pos)
class TransactionError(Exception): pass
class UnknownParamstyle(Exception):
"""
raised for unsupported db paramstyles
(currently supported: qmark, numeric, format, pyformat)
"""
pass
class SQLParam(object):
"""
Parameter in SQLQuery.
>>> q = SQLQuery(["SELECT * FROM test WHERE name=", SQLParam("joe")])
>>> q
>>> q.query()
'SELECT * FROM test WHERE name=%s'
>>> q.values()
['joe']
"""
__slots__ = ["value"]
def __init__(self, value):
self.value = value
def get_marker(self, paramstyle='pyformat'):
if paramstyle == 'qmark':
return '?'
elif paramstyle == 'numeric':
return ':1'
elif paramstyle is None or paramstyle in ['format', 'pyformat']:
return '%s'
raise UnknownParamstyle(paramstyle)
def sqlquery(self):
return SQLQuery([self])
def __add__(self, other):
return self.sqlquery() + other
def __radd__(self, other):
return other + self.sqlquery()
def __str__(self):
return str(self.value)
def __repr__(self):
return '' % repr(self.value)
sqlparam = SQLParam
class SQLQuery(object):
"""
You can pass this sort of thing as a clause in any db function.
Otherwise, you can pass a dictionary to the keyword argument `vars`
and the function will call reparam for you.
Internally, consists of `items`, which is a list of strings and
SQLParams, which get concatenated to produce the actual query.
"""
__slots__ = ["items"]
# tested in sqlquote's docstring
def __init__(self, items=None):
r"""Creates a new SQLQuery.
>>> SQLQuery("x")
>>> q = SQLQuery(['SELECT * FROM ', 'test', ' WHERE x=', SQLParam(1)])
>>> q
>>> q.query(), q.values()
('SELECT * FROM test WHERE x=%s', [1])
>>> SQLQuery(SQLParam(1))
"""
if items is None:
self.items = []
elif isinstance(items, list):
self.items = items
elif isinstance(items, SQLParam):
self.items = [items]
elif isinstance(items, SQLQuery):
self.items = list(items.items)
else:
self.items = [items]
# Take care of SQLLiterals
for i, item in enumerate(self.items):
if isinstance(item, SQLParam) and isinstance(item.value, SQLLiteral):
self.items[i] = item.value.v
def append(self, value):
self.items.append(value)
def __add__(self, other):
if isinstance(other, string_types):
items = [other]
elif isinstance(other, SQLQuery):
items = other.items
else:
return NotImplemented
return SQLQuery(self.items + items)
def __radd__(self, other):
if isinstance(other, string_types):
items = [other]
else:
return NotImplemented
return SQLQuery(items + self.items)
def __iadd__(self, other):
if isinstance(other, (string_types, SQLParam)):
self.items.append(other)
elif isinstance(other, SQLQuery):
self.items.extend(other.items)
else:
return NotImplemented
return self
def __len__(self):
return len(self.query())
def query(self, paramstyle=None):
"""
Returns the query part of the sql query.
>>> q = SQLQuery(["SELECT * FROM test WHERE name=", SQLParam('joe')])
>>> q.query()
'SELECT * FROM test WHERE name=%s'
>>> q.query(paramstyle='qmark')
'SELECT * FROM test WHERE name=?'
"""
s = []
for x in self.items:
if isinstance(x, SQLParam):
x = x.get_marker(paramstyle)
s.append(safestr(x))
else:
x = safestr(x)
# automatically escape % characters in the query
# For backward compatability, ignore escaping when the query looks already escaped
if paramstyle in ['format', 'pyformat']:
if '%' in x and '%%' not in x:
x = x.replace('%', '%%')
s.append(x)
return "".join(s)
def values(self):
"""
Returns the values of the parameters used in the sql query.
>>> q = SQLQuery(["SELECT * FROM test WHERE name=", SQLParam('joe')])
>>> q.values()
['joe']
"""
return [i.value for i in self.items if isinstance(i, SQLParam)]
def join(items, sep=' ', prefix=None, suffix=None, target=None):
"""
Joins multiple queries.
>>> SQLQuery.join(['a', 'b'], ', ')
Optinally, prefix and suffix arguments can be provided.
>>> SQLQuery.join(['a', 'b'], ', ', prefix='(', suffix=')')
If target argument is provided, the items are appended to target instead of creating a new SQLQuery.
"""
if target is None:
target = SQLQuery()
target_items = target.items
if prefix:
target_items.append(prefix)
for i, item in enumerate(items):
if i != 0:
target_items.append(sep)
if isinstance(item, SQLQuery):
target_items.extend(item.items)
else:
target_items.append(item)
if suffix:
target_items.append(suffix)
return target
join = staticmethod(join)
def _str(self):
try:
return self.query() % tuple([sqlify(x) for x in self.values()])
except (ValueError, TypeError):
return self.query()
def __str__(self):
return safestr(self._str())
def __unicode__(self):
return safeunicode(self._str())
def __repr__(self):
return '' % repr(str(self))
class SQLLiteral:
"""
Protects a string from `sqlquote`.
>>> sqlquote('NOW()')
>>> sqlquote(SQLLiteral('NOW()'))
"""
def __init__(self, v):
self.v = v
def __repr__(self):
return self.v
sqlliteral = SQLLiteral
def _sqllist(values):
"""
>>> _sqllist([1, 2, 3])
"""
items = []
items.append('(')
for i, v in enumerate(values):
if i != 0:
items.append(', ')
items.append(sqlparam(v))
items.append(')')
return SQLQuery(items)
def reparam(string_, dictionary):
"""
Takes a string and a dictionary and interpolates the string
using values from the dictionary. Returns an `SQLQuery` for the result.
>>> reparam("s = $s", dict(s=True))
>>> reparam("s IN $s", dict(s=[1, 2]))
"""
dictionary = dictionary.copy() # eval mucks with it
# disable builtins to avoid risk for remote code exection.
dictionary['__builtins__'] = object()
vals = []
result = []
for live, chunk in _interpolate(string_):
if live:
v = eval(chunk, dictionary)
result.append(sqlquote(v))
else:
result.append(chunk)
return SQLQuery.join(result, '')
def sqlify(obj):
"""
converts `obj` to its proper SQL version
>>> sqlify(None)
'NULL'
>>> sqlify(True)
"'t'"
>>> sqlify(3)
'3'
"""
# because `1 == True and hash(1) == hash(True)`
# we have to do this the hard way...
if obj is None:
return 'NULL'
elif obj is True:
return "'t'"
elif obj is False:
return "'f'"
elif isinstance(obj, numeric_types):
return str(obj)
elif isinstance(obj, datetime.datetime):
return repr(obj.isoformat())
else:
if PY2 and isinstance(obj, unicode): #Strings are always UTF8 in Py3
obj = obj.encode('utf8')
return repr(obj)
def sqllist(lst):
"""
Converts the arguments for use in something like a WHERE clause.
>>> sqllist(['a', 'b'])
'a, b'
>>> sqllist('a')
'a'
"""
if isinstance(lst, string_types):
return lst
else:
return ', '.join(lst)
def sqlors(left, lst):
"""
`left is a SQL clause like `tablename.arg = `
and `lst` is a list of values. Returns a reparam-style
pair featuring the SQL that ORs together the clause
for each item in the lst.
>>> sqlors('foo = ', [])
>>> sqlors('foo = ', [1])
>>> sqlors('foo = ', 1)
>>> sqlors('foo = ', [1,2,3])
"""
if isinstance(lst, iters):
lst = list(lst)
ln = len(lst)
if ln == 0:
return SQLQuery("1=2")
if ln == 1:
lst = lst[0]
if isinstance(lst, iters):
return SQLQuery(['('] +
sum([[left, sqlparam(x), ' OR '] for x in lst], []) +
['1=2)']
)
else:
return left + sqlparam(lst)
def sqlwhere(data, grouping=' AND '):
"""
Converts a two-tuple (key, value) iterable `data` to an SQL WHERE clause `SQLQuery`.
>>> sqlwhere((('cust_id', 2), ('order_id',3)))
>>> sqlwhere((('order_id', 3), ('cust_id', 2)), grouping=', ')
>>> sqlwhere((('a', 'a'), ('b', 'b'))).query()
'a = %s AND b = %s'
"""
return SQLQuery.join([k + ' = ' + sqlparam(v) for k, v in data], grouping)
def sqlquote(a):
"""
Ensures `a` is quoted properly for use in a SQL query.
>>> 'WHERE x = ' + sqlquote(True) + ' AND y = ' + sqlquote(3)
>>> 'WHERE x = ' + sqlquote(True) + ' AND y IN ' + sqlquote([2, 3])
"""
if isinstance(a, list):
return _sqllist(a)
else:
return sqlparam(a).sqlquery()
class Transaction:
"""Database transaction."""
def __init__(self, ctx):
self.ctx = ctx
self.transaction_count = transaction_count = len(ctx.transactions)
class transaction_engine:
"""Transaction Engine used in top level transactions."""
def do_transact(self):
ctx.commit(unload=False)
def do_commit(self):
ctx.commit()
def do_rollback(self):
ctx.rollback()
class subtransaction_engine:
"""Transaction Engine used in sub transactions."""
def query(self, q):
db_cursor = ctx.db.cursor()
ctx.db_execute(db_cursor, SQLQuery(q % transaction_count))
def do_transact(self):
self.query('SAVEPOINT webpy_sp_%s')
def do_commit(self):
self.query('RELEASE SAVEPOINT webpy_sp_%s')
def do_rollback(self):
self.query('ROLLBACK TO SAVEPOINT webpy_sp_%s')
class dummy_engine:
"""Transaction Engine used instead of subtransaction_engine
when sub transactions are not supported."""
do_transact = do_commit = do_rollback = lambda self: None
if self.transaction_count:
# nested transactions are not supported in some databases
if self.ctx.get('ignore_nested_transactions'):
self.engine = dummy_engine()
else:
self.engine = subtransaction_engine()
else:
self.engine = transaction_engine()
self.engine.do_transact()
self.ctx.transactions.append(self)
def __enter__(self):
return self
def __exit__(self, exctype, excvalue, traceback):
if exctype is not None:
self.rollback()
else:
self.commit()
def commit(self):
if len(self.ctx.transactions) > self.transaction_count:
self.engine.do_commit()
self.ctx.transactions = self.ctx.transactions[:self.transaction_count]
def rollback(self):
if len(self.ctx.transactions) > self.transaction_count:
self.engine.do_rollback()
self.ctx.transactions = self.ctx.transactions[:self.transaction_count]
class DB:
"""Database"""
def __init__(self, db_module, keywords):
"""Creates a database.
"""
# some DB implementaions take optional paramater `driver` to use a specific driver modue
# but it should not be passed to connect
keywords.pop('driver', None)
self.db_module = db_module
self.keywords = keywords
self._ctx = threadeddict()
# flag to enable/disable printing queries
self.printing = config.get('debug_sql', config.get('debug', False))
self.supports_multiple_insert = False
try:
import DBUtils
# enable pooling if DBUtils module is available.
self.has_pooling = True
except ImportError:
self.has_pooling = False
# Pooling can be disabled by passing pooling=False in the keywords.
self.has_pooling = self.keywords.pop('pooling', True) and self.has_pooling
def _getctx(self):
if not self._ctx.get('db'):
self._load_context(self._ctx)
return self._ctx
ctx = property(_getctx)
def _load_context(self, ctx):
ctx.dbq_count = 0
ctx.transactions = [] # stack of transactions
if self.has_pooling:
ctx.db = self._connect_with_pooling(self.keywords)
else:
ctx.db = self._connect(self.keywords)
ctx.db_execute = self._db_execute
if not hasattr(ctx.db, 'commit'):
ctx.db.commit = lambda: None
if not hasattr(ctx.db, 'rollback'):
ctx.db.rollback = lambda: None
def commit(unload=True):
# do db commit and release the connection if pooling is enabled.
ctx.db.commit()
if unload and self.has_pooling:
self._unload_context(self._ctx)
def rollback():
# do db rollback and release the connection if pooling is enabled.
ctx.db.rollback()
if self.has_pooling:
self._unload_context(self._ctx)
ctx.commit = commit
ctx.rollback = rollback
def _unload_context(self, ctx):
del ctx.db
def _connect(self, keywords):
return self.db_module.connect(**keywords)
def _connect_with_pooling(self, keywords):
def get_pooled_db():
from DBUtils import PooledDB
# In DBUtils 0.9.3, `dbapi` argument is renamed as `creator`
# see Bug#122112
if PooledDB.__version__.split('.') < '0.9.3'.split('.'):
return PooledDB.PooledDB(dbapi=self.db_module, **keywords)
else:
return PooledDB.PooledDB(creator=self.db_module, **keywords)
if getattr(self, '_pooleddb', None) is None:
self._pooleddb = get_pooled_db()
return self._pooleddb.connection()
def _db_cursor(self):
return self.ctx.db.cursor()
def _param_marker(self):
"""Returns parameter marker based on paramstyle attribute if this database."""
style = getattr(self, 'paramstyle', 'pyformat')
if style == 'qmark':
return '?'
elif style == 'numeric':
return ':1'
elif style in ['format', 'pyformat']:
return '%s'
raise UnknownParamstyle(style)
def _db_execute(self, cur, sql_query):
"""executes an sql query"""
self.ctx.dbq_count += 1
try:
a = time.time()
query, params = self._process_query(sql_query)
out = cur.execute(query, params)
b = time.time()
except:
if self.printing:
print('ERR:', str(sql_query), file=debug)
if self.ctx.transactions:
self.ctx.transactions[-1].rollback()
else:
self.ctx.rollback()
raise
if self.printing:
print('%s (%s): %s' % (round(b-a, 2), self.ctx.dbq_count, str(sql_query)), file=debug)
return out
def _process_query(self, sql_query):
"""Takes the SQLQuery object and returns query string and parameters.
"""
paramstyle = getattr(self, 'paramstyle', 'pyformat')
query = sql_query.query(paramstyle)
params = sql_query.values()
return query, params
def _where(self, where, vars):
if isinstance(where, numeric_types):
where = "id = " + sqlparam(where)
#@@@ for backward-compatibility
elif isinstance(where, (list, tuple)) and len(where) == 2:
where = SQLQuery(where[0], where[1])
elif isinstance(where, dict):
where = self._where_dict(where)
elif isinstance(where, SQLQuery):
pass
else:
where = reparam(where, vars)
return where
def _where_dict(self, where):
where_clauses = []
for k, v in sorted(iteritems(where), key= lambda t:t[0]):
where_clauses.append(k + ' = ' + sqlquote(v))
if where_clauses:
return SQLQuery.join(where_clauses, " AND ")
else:
return None
def query(self, sql_query, vars=None, processed=False, _test=False):
"""
Execute SQL query `sql_query` using dictionary `vars` to interpolate it.
If `processed=True`, `vars` is a `reparam`-style list to use
instead of interpolating.
>>> db = DB(None, {})
>>> db.query("SELECT * FROM foo", _test=True)
>>> db.query("SELECT * FROM foo WHERE x = $x", vars=dict(x='f'), _test=True)
>>> db.query("SELECT * FROM foo WHERE x = " + sqlquote('f'), _test=True)
"""
if vars is None: vars = {}
if not processed and not isinstance(sql_query, SQLQuery):
sql_query = reparam(sql_query, vars)
if _test: return sql_query
db_cursor = self._db_cursor()
self._db_execute(db_cursor, sql_query)
if db_cursor.description:
names = [x[0] for x in db_cursor.description]
def iterwrapper():
row = db_cursor.fetchone()
while row:
yield storage(dict(zip(names, row)))
row = db_cursor.fetchone()
out = iterbetter(iterwrapper())
out.__len__ = lambda: int(db_cursor.rowcount)
out.list = lambda: [storage(dict(zip(names, x))) \
for x in db_cursor.fetchall()]
else:
out = db_cursor.rowcount
if not self.ctx.transactions:
self.ctx.commit()
return out
def select(self, tables, vars=None, what='*', where=None, order=None, group=None,
limit=None, offset=None, _test=False):
"""
Selects `what` from `tables` with clauses `where`, `order`,
`group`, `limit`, and `offset`. Uses vars to interpolate.
Otherwise, each clause can be a SQLQuery.
>>> db = DB(None, {})
>>> db.select('foo', _test=True)
>>> db.select(['foo', 'bar'], where="foo.bar_id = bar.id", limit=5, _test=True)
>>> db.select('foo', where={'id': 5}, _test=True)
"""
if vars is None: vars = {}
sql_clauses = self.sql_clauses(what, tables, where, group, order, limit, offset)
clauses = [self.gen_clause(sql, val, vars) for sql, val in sql_clauses if val is not None]
qout = SQLQuery.join(clauses)
if _test: return qout
return self.query(qout, processed=True)
def where(self, table, what='*', order=None, group=None, limit=None,
offset=None, _test=False, **kwargs):
"""
Selects from `table` where keys are equal to values in `kwargs`.
>>> db = DB(None, {})
>>> db.where('foo', bar_id=3, _test=True)
>>> db.where('foo', source=2, crust='dewey', _test=True)
>>> db.where('foo', _test=True)
"""
where = self._where_dict(kwargs)
return self.select(table, what=what, order=order,
group=group, limit=limit, offset=offset, _test=_test,
where=where)
def sql_clauses(self, what, tables, where, group, order, limit, offset):
return (
('SELECT', what),
('FROM', sqllist(tables)),
('WHERE', where),
('GROUP BY', group),
('ORDER BY', order),
('LIMIT', limit),
('OFFSET', offset))
def gen_clause(self, sql, val, vars):
if isinstance(val, numeric_types):
if sql == 'WHERE':
nout = 'id = ' + sqlquote(val)
else:
nout = SQLQuery(val)
#@@@
elif isinstance(val, (list, tuple)) and len(val) == 2:
nout = SQLQuery(val[0], val[1]) # backwards-compatibility
elif sql == 'WHERE' and isinstance(val, dict):
nout = self._where_dict(val)
elif isinstance(val, SQLQuery):
nout = val
else:
nout = reparam(val, vars)
def xjoin(a, b):
if a and b: return a + ' ' + b
else: return a or b
return xjoin(sql, nout)
def insert(self, tablename, seqname=None, _test=False, **values):
"""
Inserts `values` into `tablename`. Returns current sequence ID.
Set `seqname` to the ID if it's not the default, or to `False`
if there isn't one.
>>> db = DB(None, {})
>>> q = db.insert('foo', name='bob', age=2, created=SQLLiteral('NOW()'), _test=True)
>>> q
>>> q.query()
'INSERT INTO foo (age, created, name) VALUES (%s, NOW(), %s)'
>>> q.values()
[2, 'bob']
"""
def q(x): return "(" + x + ")"
if values:
#needed for Py3 compatibility with the above doctests
sorted_values = sorted(values.items(), key=lambda t: t[0])
_keys = SQLQuery.join(map(lambda t: t[0], sorted_values), ', ')
_values = SQLQuery.join([sqlparam(v) for v in map(lambda t: t[1], sorted_values)], ', ')
sql_query = "INSERT INTO %s " % tablename + q(_keys) + ' VALUES ' + q(_values)
else:
sql_query = SQLQuery(self._get_insert_default_values_query(tablename))
if _test: return sql_query
db_cursor = self._db_cursor()
if seqname is not False:
sql_query = self._process_insert_query(sql_query, tablename, seqname)
if isinstance(sql_query, tuple):
# for some databases, a separate query has to be made to find
# the id of the inserted row.
q1, q2 = sql_query
self._db_execute(db_cursor, q1)
self._db_execute(db_cursor, q2)
else:
self._db_execute(db_cursor, sql_query)
try:
out = db_cursor.fetchone()[0]
except Exception:
out = None
if not self.ctx.transactions:
self.ctx.commit()
return out
def _get_insert_default_values_query(self, table):
return "INSERT INTO %s DEFAULT VALUES" % table
def multiple_insert(self, tablename, values, seqname=None, _test=False):
"""
Inserts multiple rows into `tablename`. The `values` must be a list of dictioanries,
one for each row to be inserted, each with the same set of keys.
Returns the list of ids of the inserted rows.
Set `seqname` to the ID if it's not the default, or to `False`
if there isn't one.
>>> db = DB(None, {})
>>> db.supports_multiple_insert = True
>>> values = [{"name": "foo", "email": "foo@example.com"}, {"name": "bar", "email": "bar@example.com"}]
>>> db.multiple_insert('person', values=values, _test=True)
"""
if not values:
return []
if not self.supports_multiple_insert:
out = [self.insert(tablename, seqname=seqname, _test=_test, **v) for v in values]
if seqname is False:
return None
else:
return out
keys = values[0].keys()
#@@ make sure all keys are valid
for v in values:
if v.keys() != keys:
raise ValueError('Not all rows have the same keys')
keys = sorted(keys) #enforce query order for the above doctest compatibility with Py3
sql_query = SQLQuery('INSERT INTO %s (%s) VALUES ' % (tablename, ', '.join(keys)))
for i, row in enumerate(values):
if i != 0:
sql_query.append(", ")
SQLQuery.join([SQLParam(row[k]) for k in keys], sep=", ", target=sql_query, prefix="(", suffix=")")
if _test: return sql_query
db_cursor = self._db_cursor()
if seqname is not False:
sql_query = self._process_insert_query(sql_query, tablename, seqname)
if isinstance(sql_query, tuple):
# for some databases, a separate query has to be made to find
# the id of the inserted row.
q1, q2 = sql_query
self._db_execute(db_cursor, q1)
self._db_execute(db_cursor, q2)
else:
self._db_execute(db_cursor, sql_query)
try:
out = db_cursor.fetchone()[0]
out = range(out-len(values)+1, out+1)
except Exception:
out = None
if not self.ctx.transactions:
self.ctx.commit()
return out
def update(self, tables, where, vars=None, _test=False, **values):
"""
Update `tables` with clause `where` (interpolated using `vars`)
and setting `values`.
>>> db = DB(None, {})
>>> name = 'Joseph'
>>> q = db.update('foo', where='name = $name', name='bob', age=2,
... created=SQLLiteral('NOW()'), vars=locals(), _test=True)
>>> q
>>> q.query()
'UPDATE foo SET age = %s, created = NOW(), name = %s WHERE name = %s'
>>> q.values()
[2, 'bob', 'Joseph']
"""
if vars is None: vars = {}
where = self._where(where, vars)
values = sorted(values.items(), key=lambda t: t[0])
query = (
"UPDATE " + sqllist(tables) +
" SET " + sqlwhere(values, ', ') +
" WHERE " + where)
if _test: return query
db_cursor = self._db_cursor()
self._db_execute(db_cursor, query)
if not self.ctx.transactions:
self.ctx.commit()
return db_cursor.rowcount
def delete(self, table, where, using=None, vars=None, _test=False):
"""
Deletes from `table` with clauses `where` and `using`.
>>> db = DB(None, {})
>>> name = 'Joe'
>>> db.delete('foo', where='name = $name', vars=locals(), _test=True)
"""
if vars is None: vars = {}
where = self._where(where, vars)
q = 'DELETE FROM ' + table
if using: q += ' USING ' + sqllist(using)
if where: q += ' WHERE ' + where
if _test: return q
db_cursor = self._db_cursor()
self._db_execute(db_cursor, q)
if not self.ctx.transactions:
self.ctx.commit()
return db_cursor.rowcount
def _process_insert_query(self, query, tablename, seqname):
return query
def transaction(self):
"""Start a transaction."""
return Transaction(self.ctx)
class PostgresDB(DB):
"""Postgres driver."""
def __init__(self, **keywords):
if 'pw' in keywords:
keywords['password'] = keywords.pop('pw')
db_module = import_driver(["psycopg2", "psycopg", "pgdb"], preferred=keywords.pop('driver', None))
if db_module.__name__ == "psycopg2":
import psycopg2.extensions
psycopg2.extensions.register_type(psycopg2.extensions.UNICODE)
if db_module.__name__ == "pgdb" and 'port' in keywords:
keywords["host"] += ":" + str(keywords.pop('port'))
# if db is not provided postgres driver will take it from PGDATABASE environment variable
if 'db' in keywords:
keywords['database'] = keywords.pop('db')
self.dbname = "postgres"
self.paramstyle = db_module.paramstyle
DB.__init__(self, db_module, keywords)
self.supports_multiple_insert = True
self._sequences = None
def _process_insert_query(self, query, tablename, seqname):
if seqname is None:
# when seqname is not provided guess the seqname and make sure it exists
seqname = tablename + "_id_seq"
if seqname not in self._get_all_sequences():
seqname = None
if seqname:
query += "; SELECT currval('%s')" % seqname
return query
def _get_all_sequences(self):
"""Query postgres to find names of all sequences used in this database."""
if self._sequences is None:
q = "SELECT c.relname FROM pg_class c WHERE c.relkind = 'S'"
self._sequences = set([c.relname for c in self.query(q)])
return self._sequences
def _connect(self, keywords):
conn = DB._connect(self, keywords)
try:
conn.set_client_encoding('UTF8')
except AttributeError:
# fallback for pgdb driver
conn.cursor().execute("set client_encoding to 'UTF-8'")
return conn
def _connect_with_pooling(self, keywords):
conn = DB._connect_with_pooling(self, keywords)
conn._con._con.set_client_encoding('UTF8')
return conn
class MySQLDB(DB):
def __init__(self, **keywords):
db = import_driver(["MySQLdb", "pymysql","mysql.connector"], preferred=keywords.pop('driver', None))
if db.__name__ == "MySQLdb":
if 'pw' in keywords:
keywords['passwd'] = keywords['pw']
del keywords['pw']
if db.__name__ == "pymysql":
if 'pw' in keywords:
keywords['password'] = keywords['pw']
del keywords['pw']
if db.__name__ == "mysql.connector":
if 'pw' in keywords:
keywords['password'] = keywords['pw']
del keywords['pw']
if 'charset' not in keywords:
keywords['charset'] = 'utf8'
elif keywords['charset'] is None:
del keywords['charset']
self.paramstyle = db.paramstyle = 'pyformat' # it's both, like psycopg
self.dbname = "mysql"
DB.__init__(self, db, keywords)
self.supports_multiple_insert = True
def _process_insert_query(self, query, tablename, seqname):
return query, SQLQuery('SELECT last_insert_id();')
def _get_insert_default_values_query(self, table):
return "INSERT INTO %s () VALUES()" % table
def import_driver(drivers, preferred=None):
"""Import the first available driver or preferred driver.
"""
if preferred:
drivers = [preferred]
for d in drivers:
try:
return __import__(d, None, None, ['x'])
except ImportError:
pass
raise ImportError("Unable to import " + " or ".join(drivers))
class SqliteDB(DB):
def __init__(self, **keywords):
db = import_driver(["sqlite3", "pysqlite2.dbapi2", "sqlite"], preferred=keywords.pop('driver', None))
if db.__name__ in ["sqlite3", "pysqlite2.dbapi2"]:
db.paramstyle = 'qmark'
# sqlite driver doesn't create datatime objects for timestamp columns unless `detect_types` option is passed.
# It seems to be supported in sqlite3 and pysqlite2 drivers, not surte about sqlite.
keywords.setdefault('detect_types', db.PARSE_DECLTYPES)
self.paramstyle = db.paramstyle
keywords['database'] = keywords.pop('db')
keywords['pooling'] = False # sqlite don't allows connections to be shared by threads
self.dbname = "sqlite"
DB.__init__(self, db, keywords)
def _process_insert_query(self, query, tablename, seqname):
return query, SQLQuery('SELECT last_insert_rowid();')
def query(self, *a, **kw):
out = DB.query(self, *a, **kw)
if isinstance(out, iterbetter):
del out.__len__
return out
class FirebirdDB(DB):
"""Firebird Database.
"""
def __init__(self, **keywords):
try:
import kinterbasdb as db
except Exception:
db = None
pass
if 'pw' in keywords:
keywords['password'] = keywords.pop('pw')
keywords['database'] = keywords.pop('db')
self.paramstyle = db.paramstyle
DB.__init__(self, db, keywords)
def delete(self, table, where=None, using=None, vars=None, _test=False):
# firebird doesn't support using clause
using=None
return DB.delete(self, table, where, using, vars, _test)
def sql_clauses(self, what, tables, where, group, order, limit, offset):
return (
('SELECT', ''),
('FIRST', limit),
('SKIP', offset),
('', what),
('FROM', sqllist(tables)),
('WHERE', where),
('GROUP BY', group),
('ORDER BY', order)
)
class MSSQLDB(DB):
def __init__(self, **keywords):
import pymssql as db
if 'pw' in keywords:
keywords['password'] = keywords.pop('pw')
keywords['database'] = keywords.pop('db')
self.dbname = "mssql"
DB.__init__(self, db, keywords)
def _process_query(self, sql_query):
"""Takes the SQLQuery object and returns query string and parameters.
"""
# MSSQLDB expects params to be a tuple.
# Overwriting the default implementation to convert params to tuple.
paramstyle = getattr(self, 'paramstyle', 'pyformat')
query = sql_query.query(paramstyle)
params = sql_query.values()
return query, tuple(params)
def sql_clauses(self, what, tables, where, group, order, limit, offset):
return (
('SELECT', what),
('TOP', limit),
('FROM', sqllist(tables)),
('WHERE', where),
('GROUP BY', group),
('ORDER BY', order),
('OFFSET', offset))
def _test(self):
"""Test LIMIT.
Fake presence of pymssql module for running tests.
>>> import sys
>>> sys.modules['pymssql'] = sys.modules['sys']
MSSQL has TOP clause instead of LIMIT clause.
>>> db = MSSQLDB(db='test', user='joe', pw='secret')
>>> db.select('foo', limit=4, _test=True)
"""
pass
class OracleDB(DB):
def __init__(self, **keywords):
import cx_Oracle as db
if 'pw' in keywords:
keywords['password'] = keywords.pop('pw')
#@@ TODO: use db.makedsn if host, port is specified
keywords['dsn'] = keywords.pop('db')
self.dbname = 'oracle'
db.paramstyle = 'numeric'
self.paramstyle = db.paramstyle
# oracle doesn't support pooling
keywords.pop('pooling', None)
DB.__init__(self, db, keywords)
def _process_insert_query(self, query, tablename, seqname):
if seqname is None:
# It is not possible to get seq name from table name in Oracle
return query
else:
return query + "; SELECT %s.currval FROM dual" % seqname
def dburl2dict(url):
"""
Takes a URL to a database and parses it into an equivalent dictionary.
>>> dburl2dict('postgres:///mygreatdb') == {'pw': None, 'dbn': 'postgres', 'db': 'mygreatdb', 'host': None, 'user': None, 'port': None}
True
>>> dburl2dict('postgres://james:day@serverfarm.example.net:5432/mygreatdb') == {'pw': 'day', 'dbn': 'postgres', 'db': 'mygreatdb', 'host': 'serverfarm.example.net', 'user': 'james', 'port': 5432}
True
>>> dburl2dict('postgres://james:day@serverfarm.example.net/mygreatdb') == {'pw': 'day', 'dbn': 'postgres', 'db': 'mygreatdb', 'host': 'serverfarm.example.net', 'user': 'james', 'port': None}
True
>>> dburl2dict('postgres://james:d%40y@serverfarm.example.net/mygreatdb') == {'pw': 'd@y', 'dbn': 'postgres', 'db': 'mygreatdb', 'host': 'serverfarm.example.net', 'user': 'james', 'port': None}
True
>>> dburl2dict('mysql://james:d%40y@serverfarm.example.net/mygreatdb') == {'pw': 'd@y', 'dbn': 'mysql', 'db': 'mygreatdb', 'host': 'serverfarm.example.net', 'user': 'james', 'port': None}
True
"""
parts = urlparse.urlparse(unquote(url))
return {'dbn': parts.scheme,
'user': parts.username,
'pw': parts.password,
'db': parts.path[1:],
'host': parts.hostname,
'port': parts.port}
_databases = {}
def database(dburl=None, **params):
"""Creates appropriate database using params.
Pooling will be enabled if DBUtils module is available.
Pooling can be disabled by passing pooling=False in params.
"""
if not dburl and not params:
dburl = os.environ['DATABASE_URL']
if dburl:
params = dburl2dict(dburl)
dbn = params.pop('dbn')
if dbn in _databases:
return _databases[dbn](**params)
else:
raise UnknownDB(dbn)
def register_database(name, clazz):
"""
Register a database.
>>> class LegacyDB(DB):
... def __init__(self, **params):
... pass
...
>>> register_database('legacy', LegacyDB)
>>> db = database(dbn='legacy', db='test', user='joe', passwd='secret')
"""
_databases[name] = clazz
register_database('mysql', MySQLDB)
register_database('postgres', PostgresDB)
register_database('sqlite', SqliteDB)
register_database('firebird', FirebirdDB)
register_database('mssql', MSSQLDB)
register_database('oracle', OracleDB)
def _interpolate(format):
"""
Takes a format string and returns a list of 2-tuples of the form
(boolean, string) where boolean says whether string should be evaled
or not.
from (public domain, Ka-Ping Yee)
"""
def matchorfail(text, pos):
match = tokenprog.match(text, pos)
if match is None:
raise _ItplError(text, pos)
return match, match.end()
namechars = "abcdefghijklmnopqrstuvwxyz" \
"ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_";
chunks = []
pos = 0
while 1:
dollar = format.find("$", pos)
if dollar < 0:
break
nextchar = format[dollar + 1]
if nextchar == "{":
chunks.append((0, format[pos:dollar]))
pos, level = dollar + 2, 1
while level:
match, pos = matchorfail(format, pos)
tstart, tend = match.regs[3]
token = format[tstart:tend]
if token == "{":
level = level + 1
elif token == "}":
level = level - 1
chunks.append((1, format[dollar + 2:pos - 1]))
elif nextchar in namechars:
chunks.append((0, format[pos:dollar]))
match, pos = matchorfail(format, dollar + 1)
while pos < len(format):
if format[pos] == "." and \
pos + 1 < len(format) and format[pos + 1] in namechars:
match, pos = matchorfail(format, pos + 1)
elif format[pos] in "([":
pos, level = pos + 1, 1
while level:
match, pos = matchorfail(format, pos)
tstart, tend = match.regs[3]
token = format[tstart:tend]
if token[0] in "([":
level = level + 1
elif token[0] in ")]":
level = level - 1
else:
break
chunks.append((1, format[dollar + 1:pos]))
else:
chunks.append((0, format[pos:dollar + 1]))
pos = dollar + 1 + (nextchar == "$")
if pos < len(format):
chunks.append((0, format[pos:]))
return chunks
if __name__ == "__main__":
import doctest
doctest.testmod()
webpy/web/wsgiserver/ 0000755 0001750 0001750 00000000000 13146625266 013330 5 ustar wmb wmb webpy/web/wsgiserver/wsgiserver2.py 0000644 0001750 0001750 00000257077 13146625266 016206 0 ustar wmb wmb """A high-speed, production ready, thread pooled, generic HTTP server.
Simplest example on how to use this module directly
(without using CherryPy's application machinery)::
from cherrypy import wsgiserver
def my_crazy_app(environ, start_response):
status = '200 OK'
response_headers = [('Content-type','text/plain')]
start_response(status, response_headers)
return ['Hello world!']
server = wsgiserver.CherryPyWSGIServer(
('0.0.0.0', 8070), my_crazy_app,
server_name='www.cherrypy.example')
server.start()
The CherryPy WSGI server can serve as many WSGI applications
as you want in one instance by using a WSGIPathInfoDispatcher::
d = WSGIPathInfoDispatcher({'/': my_crazy_app, '/blog': my_blog_app})
server = wsgiserver.CherryPyWSGIServer(('0.0.0.0', 80), d)
Want SSL support? Just set server.ssl_adapter to an SSLAdapter instance.
This won't call the CherryPy engine (application side) at all, only the
HTTP server, which is independent from the rest of CherryPy. Don't
let the name "CherryPyWSGIServer" throw you; the name merely reflects
its origin, not its coupling.
For those of you wanting to understand internals of this module, here's the
basic call flow. The server's listening thread runs a very tight loop,
sticking incoming connections onto a Queue::
server = CherryPyWSGIServer(...)
server.start()
while True:
tick()
# This blocks until a request comes in:
child = socket.accept()
conn = HTTPConnection(child, ...)
server.requests.put(conn)
Worker threads are kept in a pool and poll the Queue, popping off and then
handling each connection in turn. Each connection can consist of an arbitrary
number of requests and their responses, so we run a nested loop::
while True:
conn = server.requests.get()
conn.communicate()
-> while True:
req = HTTPRequest(...)
req.parse_request()
-> # Read the Request-Line, e.g. "GET /page HTTP/1.1"
req.rfile.readline()
read_headers(req.rfile, req.inheaders)
req.respond()
-> response = app(...)
try:
for chunk in response:
if chunk:
req.write(chunk)
finally:
if hasattr(response, "close"):
response.close()
if req.close_connection:
return
"""
__all__ = ['HTTPRequest', 'HTTPConnection', 'HTTPServer',
'SizeCheckWrapper', 'KnownLengthRFile', 'ChunkedRFile',
'CP_fileobject',
'MaxSizeExceeded', 'NoSSLError', 'FatalSSLAlert',
'WorkerThread', 'ThreadPool', 'SSLAdapter',
'CherryPyWSGIServer',
'Gateway', 'WSGIGateway', 'WSGIGateway_10', 'WSGIGateway_u0',
'WSGIPathInfoDispatcher', 'get_ssl_adapter_class',
'socket_errors_to_ignore']
import os
try:
import queue
except:
import Queue as queue
import re
import email.utils
import socket
import sys
import threading
import time
import traceback as traceback_
import operator
from urllib import unquote
from urlparse import urlparse
import warnings
import errno
import logging
try:
# prefer slower Python-based io module
import _pyio as io
except ImportError:
# Python 2.6
import io
try:
import pkg_resources
except ImportError:
pass
if 'win' in sys.platform and hasattr(socket, "AF_INET6"):
if not hasattr(socket, 'IPPROTO_IPV6'):
socket.IPPROTO_IPV6 = 41
if not hasattr(socket, 'IPV6_V6ONLY'):
socket.IPV6_V6ONLY = 27
DEFAULT_BUFFER_SIZE = io.DEFAULT_BUFFER_SIZE
try:
cp_version = pkg_resources.require('cherrypy')[0].version
except Exception:
cp_version = 'unknown'
class FauxSocket(object):
"""Faux socket with the minimal interface required by pypy"""
def _reuse(self):
pass
_fileobject_uses_str_type = isinstance(
socket._fileobject(FauxSocket())._rbuf, basestring)
del FauxSocket # this class is not longer required for anything.
if sys.version_info >= (3, 0):
unicodestr = str
basestring = (bytes, str)
def ntob(n, encoding='ISO-8859-1'):
"""Return the given native string as a byte string in the given
encoding.
"""
# In Python 3, the native string type is unicode
return n.encode(encoding)
else:
unicodestr = unicode
basestring = basestring
def ntob(n, encoding='ISO-8859-1'):
"""Return the given native string as a byte string in the given
encoding.
"""
# In Python 2, the native string type is bytes. Assume it's already
# in the given encoding, which for ISO-8859-1 is almost always what
# was intended.
return n
LF = ntob('\n')
CRLF = ntob('\r\n')
TAB = ntob('\t')
SPACE = ntob(' ')
COLON = ntob(':')
SEMICOLON = ntob(';')
EMPTY = ntob('')
NUMBER_SIGN = ntob('#')
QUESTION_MARK = ntob('?')
ASTERISK = ntob('*')
FORWARD_SLASH = ntob('/')
quoted_slash = re.compile(ntob("(?i)%2F"))
def plat_specific_errors(*errnames):
"""Return error numbers for all errors in errnames on this platform.
The 'errno' module contains different global constants depending on
the specific platform (OS). This function will return the list of
numeric values for a given list of potential names.
"""
errno_names = dir(errno)
nums = [getattr(errno, k) for k in errnames if k in errno_names]
# de-dupe the list
return list(dict.fromkeys(nums).keys())
socket_error_eintr = plat_specific_errors("EINTR", "WSAEINTR")
socket_errors_to_ignore = plat_specific_errors(
"EPIPE",
"EBADF", "WSAEBADF",
"ENOTSOCK", "WSAENOTSOCK",
"ETIMEDOUT", "WSAETIMEDOUT",
"ECONNREFUSED", "WSAECONNREFUSED",
"ECONNRESET", "WSAECONNRESET",
"ECONNABORTED", "WSAECONNABORTED",
"ENETRESET", "WSAENETRESET",
"EHOSTDOWN", "EHOSTUNREACH",
)
socket_errors_to_ignore.append("timed out")
socket_errors_to_ignore.append("The read operation timed out")
if sys.platform == 'darwin':
socket_errors_to_ignore.append(plat_specific_errors("EPROTOTYPE"))
socket_errors_nonblocking = plat_specific_errors(
'EAGAIN', 'EWOULDBLOCK', 'WSAEWOULDBLOCK')
comma_separated_headers = [
ntob(h) for h in
['Accept', 'Accept-Charset', 'Accept-Encoding',
'Accept-Language', 'Accept-Ranges', 'Allow', 'Cache-Control',
'Connection', 'Content-Encoding', 'Content-Language', 'Expect',
'If-Match', 'If-None-Match', 'Pragma', 'Proxy-Authenticate', 'TE',
'Trailer', 'Transfer-Encoding', 'Upgrade', 'Vary', 'Via', 'Warning',
'WWW-Authenticate']
]
if not hasattr(logging, 'statistics'):
logging.statistics = {}
def read_headers(rfile, hdict=None):
"""Read headers from the given stream into the given header dict.
If hdict is None, a new header dict is created. Returns the populated
header dict.
Headers which are repeated are folded together using a comma if their
specification so dictates.
This function raises ValueError when the read bytes violate the HTTP spec.
You should probably return "400 Bad Request" if this happens.
"""
if hdict is None:
hdict = {}
while True:
line = rfile.readline()
if not line:
# No more data--illegal end of headers
raise ValueError("Illegal end of headers.")
if line == CRLF:
# Normal end of headers
break
if not line.endswith(CRLF):
raise ValueError("HTTP requires CRLF terminators")
if line[0] in (SPACE, TAB):
# It's a continuation line.
v = line.strip()
else:
try:
k, v = line.split(COLON, 1)
except ValueError:
raise ValueError("Illegal header line.")
# TODO: what about TE and WWW-Authenticate?
k = k.strip().title()
v = v.strip()
hname = k
if k in comma_separated_headers:
existing = hdict.get(hname)
if existing:
v = ", ".join((existing, v))
hdict[hname] = v
return hdict
class MaxSizeExceeded(Exception):
pass
class SizeCheckWrapper(object):
"""Wraps a file-like object, raising MaxSizeExceeded if too large."""
def __init__(self, rfile, maxlen):
self.rfile = rfile
self.maxlen = maxlen
self.bytes_read = 0
def _check_length(self):
if self.maxlen and self.bytes_read > self.maxlen:
raise MaxSizeExceeded()
def read(self, size=None):
data = self.rfile.read(size)
self.bytes_read += len(data)
self._check_length()
return data
def readline(self, size=None):
if size is not None:
data = self.rfile.readline(size)
self.bytes_read += len(data)
self._check_length()
return data
# User didn't specify a size ...
# We read the line in chunks to make sure it's not a 100MB line !
res = []
while True:
data = self.rfile.readline(256)
self.bytes_read += len(data)
self._check_length()
res.append(data)
# See https://github.com/cherrypy/cherrypy/issues/421
if len(data) < 256 or data[-1:] == LF:
return EMPTY.join(res)
def readlines(self, sizehint=0):
# Shamelessly stolen from StringIO
total = 0
lines = []
line = self.readline()
while line:
lines.append(line)
total += len(line)
if 0 < sizehint <= total:
break
line = self.readline()
return lines
def close(self):
self.rfile.close()
def __iter__(self):
return self
def __next__(self):
data = next(self.rfile)
self.bytes_read += len(data)
self._check_length()
return data
def next(self):
data = self.rfile.next()
self.bytes_read += len(data)
self._check_length()
return data
class KnownLengthRFile(object):
"""Wraps a file-like object, returning an empty string when exhausted."""
def __init__(self, rfile, content_length):
self.rfile = rfile
self.remaining = content_length
def read(self, size=None):
if self.remaining == 0:
return ''
if size is None:
size = self.remaining
else:
size = min(size, self.remaining)
data = self.rfile.read(size)
self.remaining -= len(data)
return data
def readline(self, size=None):
if self.remaining == 0:
return ''
if size is None:
size = self.remaining
else:
size = min(size, self.remaining)
data = self.rfile.readline(size)
self.remaining -= len(data)
return data
def readlines(self, sizehint=0):
# Shamelessly stolen from StringIO
total = 0
lines = []
line = self.readline(sizehint)
while line:
lines.append(line)
total += len(line)
if 0 < sizehint <= total:
break
line = self.readline(sizehint)
return lines
def close(self):
self.rfile.close()
def __iter__(self):
return self
def __next__(self):
data = next(self.rfile)
self.remaining -= len(data)
return data
class ChunkedRFile(object):
"""Wraps a file-like object, returning an empty string when exhausted.
This class is intended to provide a conforming wsgi.input value for
request entities that have been encoded with the 'chunked' transfer
encoding.
"""
def __init__(self, rfile, maxlen, bufsize=8192):
self.rfile = rfile
self.maxlen = maxlen
self.bytes_read = 0
self.buffer = EMPTY
self.bufsize = bufsize
self.closed = False
def _fetch(self):
if self.closed:
return
line = self.rfile.readline()
self.bytes_read += len(line)
if self.maxlen and self.bytes_read > self.maxlen:
raise MaxSizeExceeded("Request Entity Too Large", self.maxlen)
line = line.strip().split(SEMICOLON, 1)
try:
chunk_size = line.pop(0)
chunk_size = int(chunk_size, 16)
except ValueError:
raise ValueError("Bad chunked transfer size: " + repr(chunk_size))
if chunk_size <= 0:
self.closed = True
return
## if line: chunk_extension = line[0]
if self.maxlen and self.bytes_read + chunk_size > self.maxlen:
raise IOError("Request Entity Too Large")
chunk = self.rfile.read(chunk_size)
self.bytes_read += len(chunk)
self.buffer += chunk
crlf = self.rfile.read(2)
if crlf != CRLF:
raise ValueError(
"Bad chunked transfer coding (expected '\\r\\n', "
"got " + repr(crlf) + ")")
def read(self, size=None):
data = EMPTY
while True:
if size and len(data) >= size:
return data
if not self.buffer:
self._fetch()
if not self.buffer:
# EOF
return data
if size:
remaining = size - len(data)
data += self.buffer[:remaining]
self.buffer = self.buffer[remaining:]
else:
data += self.buffer
def readline(self, size=None):
data = EMPTY
while True:
if size and len(data) >= size:
return data
if not self.buffer:
self._fetch()
if not self.buffer:
# EOF
return data
newline_pos = self.buffer.find(LF)
if size:
if newline_pos == -1:
remaining = size - len(data)
data += self.buffer[:remaining]
self.buffer = self.buffer[remaining:]
else:
remaining = min(size - len(data), newline_pos)
data += self.buffer[:remaining]
self.buffer = self.buffer[remaining:]
else:
if newline_pos == -1:
data += self.buffer
else:
data += self.buffer[:newline_pos]
self.buffer = self.buffer[newline_pos:]
def readlines(self, sizehint=0):
# Shamelessly stolen from StringIO
total = 0
lines = []
line = self.readline(sizehint)
while line:
lines.append(line)
total += len(line)
if 0 < sizehint <= total:
break
line = self.readline(sizehint)
return lines
def read_trailer_lines(self):
if not self.closed:
raise ValueError(
"Cannot read trailers until the request body has been read.")
while True:
line = self.rfile.readline()
if not line:
# No more data--illegal end of headers
raise ValueError("Illegal end of headers.")
self.bytes_read += len(line)
if self.maxlen and self.bytes_read > self.maxlen:
raise IOError("Request Entity Too Large")
if line == CRLF:
# Normal end of headers
break
if not line.endswith(CRLF):
raise ValueError("HTTP requires CRLF terminators")
yield line
def close(self):
self.rfile.close()
def __iter__(self):
# Shamelessly stolen from StringIO
total = 0
line = self.readline(sizehint)
while line:
yield line
total += len(line)
if 0 < sizehint <= total:
break
line = self.readline(sizehint)
class HTTPRequest(object):
"""An HTTP Request (and response).
A single HTTP connection may consist of multiple request/response pairs.
"""
server = None
"""The HTTPServer object which is receiving this request."""
conn = None
"""The HTTPConnection object on which this request connected."""
inheaders = {}
"""A dict of request headers."""
outheaders = []
"""A list of header tuples to write in the response."""
ready = False
"""When True, the request has been parsed and is ready to begin generating
the response. When False, signals the calling Connection that the response
should not be generated and the connection should close."""
close_connection = False
"""Signals the calling Connection that the request should close. This does
not imply an error! The client and/or server may each request that the
connection be closed."""
chunked_write = False
"""If True, output will be encoded with the "chunked" transfer-coding.
This value is set automatically inside send_headers."""
def __init__(self, server, conn):
self.server = server
self.conn = conn
self.ready = False
self.started_request = False
self.scheme = ntob("http")
if self.server.ssl_adapter is not None:
self.scheme = ntob("https")
# Use the lowest-common protocol in case read_request_line errors.
self.response_protocol = 'HTTP/1.0'
self.inheaders = {}
self.status = ""
self.outheaders = []
self.sent_headers = False
self.close_connection = self.__class__.close_connection
self.chunked_read = False
self.chunked_write = self.__class__.chunked_write
def parse_request(self):
"""Parse the next HTTP request start-line and message-headers."""
self.rfile = SizeCheckWrapper(self.conn.rfile,
self.server.max_request_header_size)
try:
success = self.read_request_line()
except MaxSizeExceeded:
self.simple_response(
"414 Request-URI Too Long",
"The Request-URI sent with the request exceeds the maximum "
"allowed bytes.")
return
else:
if not success:
return
try:
success = self.read_request_headers()
except MaxSizeExceeded:
self.simple_response(
"413 Request Entity Too Large",
"The headers sent with the request exceed the maximum "
"allowed bytes.")
return
else:
if not success:
return
self.ready = True
def read_request_line(self):
# HTTP/1.1 connections are persistent by default. If a client
# requests a page, then idles (leaves the connection open),
# then rfile.readline() will raise socket.error("timed out").
# Note that it does this based on the value given to settimeout(),
# and doesn't need the client to request or acknowledge the close
# (although your TCP stack might suffer for it: cf Apache's history
# with FIN_WAIT_2).
request_line = self.rfile.readline()
# Set started_request to True so communicate() knows to send 408
# from here on out.
self.started_request = True
if not request_line:
return False
if request_line == CRLF:
# RFC 2616 sec 4.1: "...if the server is reading the protocol
# stream at the beginning of a message and receives a CRLF
# first, it should ignore the CRLF."
# But only ignore one leading line! else we enable a DoS.
request_line = self.rfile.readline()
if not request_line:
return False
if not request_line.endswith(CRLF):
self.simple_response(
"400 Bad Request", "HTTP requires CRLF terminators")
return False
try:
method, uri, req_protocol = request_line.strip().split(SPACE, 2)
rp = int(req_protocol[5]), int(req_protocol[7])
except (ValueError, IndexError):
self.simple_response("400 Bad Request", "Malformed Request-Line")
return False
self.uri = uri
self.method = method
# uri may be an abs_path (including "http://host.domain.tld");
scheme, authority, path = self.parse_request_uri(uri)
if path is None:
self.simple_response("400 Bad Request",
"Invalid path in Request-URI.")
return False
if NUMBER_SIGN in path:
self.simple_response("400 Bad Request",
"Illegal #fragment in Request-URI.")
return False
if scheme:
self.scheme = scheme
qs = EMPTY
if QUESTION_MARK in path:
path, qs = path.split(QUESTION_MARK, 1)
# Unquote the path+params (e.g. "/this%20path" -> "/this path").
# http://www.w3.org/Protocols/rfc2616/rfc2616-sec5.html#sec5.1.2
#
# But note that "...a URI must be separated into its components
# before the escaped characters within those components can be
# safely decoded." http://www.ietf.org/rfc/rfc2396.txt, sec 2.4.2
# Therefore, "/this%2Fpath" becomes "/this%2Fpath", not "/this/path".
try:
atoms = [unquote(x) for x in quoted_slash.split(path)]
except ValueError:
ex = sys.exc_info()[1]
self.simple_response("400 Bad Request", ex.args[0])
return False
path = "%2F".join(atoms)
self.path = path
# Note that, like wsgiref and most other HTTP servers,
# we "% HEX HEX"-unquote the path but not the query string.
self.qs = qs
# Compare request and server HTTP protocol versions, in case our
# server does not support the requested protocol. Limit our output
# to min(req, server). We want the following output:
# request server actual written supported response
# protocol protocol response protocol feature set
# a 1.0 1.0 1.0 1.0
# b 1.0 1.1 1.1 1.0
# c 1.1 1.0 1.0 1.0
# d 1.1 1.1 1.1 1.1
# Notice that, in (b), the response will be "HTTP/1.1" even though
# the client only understands 1.0. RFC 2616 10.5.6 says we should
# only return 505 if the _major_ version is different.
sp = int(self.server.protocol[5]), int(self.server.protocol[7])
if sp[0] != rp[0]:
self.simple_response("505 HTTP Version Not Supported")
return False
self.request_protocol = req_protocol
self.response_protocol = "HTTP/%s.%s" % min(rp, sp)
return True
def read_request_headers(self):
"""Read self.rfile into self.inheaders. Return success."""
# then all the http headers
try:
read_headers(self.rfile, self.inheaders)
except ValueError:
ex = sys.exc_info()[1]
self.simple_response("400 Bad Request", ex.args[0])
return False
mrbs = self.server.max_request_body_size
if mrbs and int(self.inheaders.get("Content-Length", 0)) > mrbs:
self.simple_response(
"413 Request Entity Too Large",
"The entity sent with the request exceeds the maximum "
"allowed bytes.")
return False
# Persistent connection support
if self.response_protocol == "HTTP/1.1":
# Both server and client are HTTP/1.1
if self.inheaders.get("Connection", "") == "close":
self.close_connection = True
else:
# Either the server or client (or both) are HTTP/1.0
if self.inheaders.get("Connection", "") != "Keep-Alive":
self.close_connection = True
# Transfer-Encoding support
te = None
if self.response_protocol == "HTTP/1.1":
te = self.inheaders.get("Transfer-Encoding")
if te:
te = [x.strip().lower() for x in te.split(",") if x.strip()]
self.chunked_read = False
if te:
for enc in te:
if enc == "chunked":
self.chunked_read = True
else:
# Note that, even if we see "chunked", we must reject
# if there is an extension we don't recognize.
self.simple_response("501 Unimplemented")
self.close_connection = True
return False
# From PEP 333:
# "Servers and gateways that implement HTTP 1.1 must provide
# transparent support for HTTP 1.1's "expect/continue" mechanism.
# This may be done in any of several ways:
# 1. Respond to requests containing an Expect: 100-continue request
# with an immediate "100 Continue" response, and proceed normally.
# 2. Proceed with the request normally, but provide the application
# with a wsgi.input stream that will send the "100 Continue"
# response if/when the application first attempts to read from
# the input stream. The read request must then remain blocked
# until the client responds.
# 3. Wait until the client decides that the server does not support
# expect/continue, and sends the request body on its own.
# (This is suboptimal, and is not recommended.)
#
# We used to do 3, but are now doing 1. Maybe we'll do 2 someday,
# but it seems like it would be a big slowdown for such a rare case.
if self.inheaders.get("Expect", "") == "100-continue":
# Don't use simple_response here, because it emits headers
# we don't want. See
# https://github.com/cherrypy/cherrypy/issues/951
msg = self.server.protocol + " 100 Continue\r\n\r\n"
try:
self.conn.wfile.sendall(msg)
except socket.error:
x = sys.exc_info()[1]
if x.args[0] not in socket_errors_to_ignore:
raise
return True
def parse_request_uri(self, uri):
"""Parse a Request-URI into (scheme, authority, path).
Note that Request-URI's must be one of::
Request-URI = "*" | absoluteURI | abs_path | authority
Therefore, a Request-URI which starts with a double forward-slash
cannot be a "net_path"::
net_path = "//" authority [ abs_path ]
Instead, it must be interpreted as an "abs_path" with an empty first
path segment::
abs_path = "/" path_segments
path_segments = segment *( "/" segment )
segment = *pchar *( ";" param )
param = *pchar
"""
if uri == ASTERISK:
return None, None, uri
scheme, authority, path, params, query, fragment = urlparse(uri)
if scheme and QUESTION_MARK not in scheme:
# An absoluteURI.
# If there's a scheme (and it must be http or https), then:
# http_URL = "http:" "//" host [ ":" port ] [ abs_path [ "?" query
# ]]
return scheme, authority, path
if uri.startswith(FORWARD_SLASH):
# An abs_path.
return None, None, uri
else:
# An authority.
return None, uri, None
def respond(self):
"""Call the gateway and write its iterable output."""
mrbs = self.server.max_request_body_size
if self.chunked_read:
self.rfile = ChunkedRFile(self.conn.rfile, mrbs)
else:
cl = int(self.inheaders.get("Content-Length", 0))
if mrbs and mrbs < cl:
if not self.sent_headers:
self.simple_response(
"413 Request Entity Too Large",
"The entity sent with the request exceeds the maximum "
"allowed bytes.")
return
self.rfile = KnownLengthRFile(self.conn.rfile, cl)
self.server.gateway(self).respond()
if (self.ready and not self.sent_headers):
self.sent_headers = True
self.send_headers()
if self.chunked_write:
self.conn.wfile.sendall("0\r\n\r\n")
def simple_response(self, status, msg=""):
"""Write a simple response back to the client."""
status = str(status)
buf = [self.server.protocol + SPACE +
status + CRLF,
"Content-Length: %s\r\n" % len(msg),
"Content-Type: text/plain\r\n"]
if status[:3] in ("413", "414"):
# Request Entity Too Large / Request-URI Too Long
self.close_connection = True
if self.response_protocol == 'HTTP/1.1':
# This will not be true for 414, since read_request_line
# usually raises 414 before reading the whole line, and we
# therefore cannot know the proper response_protocol.
buf.append("Connection: close\r\n")
else:
# HTTP/1.0 had no 413/414 status nor Connection header.
# Emit 400 instead and trust the message body is enough.
status = "400 Bad Request"
buf.append(CRLF)
if msg:
if isinstance(msg, unicodestr):
msg = msg.encode("ISO-8859-1")
buf.append(msg)
try:
self.conn.wfile.sendall("".join(buf))
except socket.error:
x = sys.exc_info()[1]
if x.args[0] not in socket_errors_to_ignore:
raise
def write(self, chunk):
"""Write unbuffered data to the client."""
if self.chunked_write and chunk:
buf = [hex(len(chunk))[2:], CRLF, chunk, CRLF]
self.conn.wfile.sendall(EMPTY.join(buf))
else:
self.conn.wfile.sendall(chunk)
def send_headers(self):
"""Assert, process, and send the HTTP response message-headers.
You must set self.status, and self.outheaders before calling this.
"""
hkeys = [key.lower() for key, value in self.outheaders]
status = int(self.status[:3])
if status == 413:
# Request Entity Too Large. Close conn to avoid garbage.
self.close_connection = True
elif "content-length" not in hkeys:
# "All 1xx (informational), 204 (no content),
# and 304 (not modified) responses MUST NOT
# include a message-body." So no point chunking.
if status < 200 or status in (204, 205, 304):
pass
else:
if (self.response_protocol == 'HTTP/1.1'
and self.method != 'HEAD'):
# Use the chunked transfer-coding
self.chunked_write = True
self.outheaders.append(("Transfer-Encoding", "chunked"))
else:
# Closing the conn is the only way to determine len.
self.close_connection = True
if "connection" not in hkeys:
if self.response_protocol == 'HTTP/1.1':
# Both server and client are HTTP/1.1 or better
if self.close_connection:
self.outheaders.append(("Connection", "close"))
else:
# Server and/or client are HTTP/1.0
if not self.close_connection:
self.outheaders.append(("Connection", "Keep-Alive"))
if (not self.close_connection) and (not self.chunked_read):
# Read any remaining request body data on the socket.
# "If an origin server receives a request that does not include an
# Expect request-header field with the "100-continue" expectation,
# the request includes a request body, and the server responds
# with a final status code before reading the entire request body
# from the transport connection, then the server SHOULD NOT close
# the transport connection until it has read the entire request,
# or until the client closes the connection. Otherwise, the client
# might not reliably receive the response message. However, this
# requirement is not be construed as preventing a server from
# defending itself against denial-of-service attacks, or from
# badly broken client implementations."
remaining = getattr(self.rfile, 'remaining', 0)
if remaining > 0:
self.rfile.read(remaining)
if "date" not in hkeys:
self.outheaders.append(("Date", email.utils.formatdate()))
if "server" not in hkeys:
self.outheaders.append(("Server", self.server.server_name))
buf = [self.server.protocol + SPACE + self.status + CRLF]
for k, v in self.outheaders:
buf.append(k + COLON + SPACE + v + CRLF)
buf.append(CRLF)
self.conn.wfile.sendall(EMPTY.join(buf))
class NoSSLError(Exception):
"""Exception raised when a client speaks HTTP to an HTTPS socket."""
pass
class FatalSSLAlert(Exception):
"""Exception raised when the SSL implementation signals a fatal alert."""
pass
class CP_fileobject(socket._fileobject):
"""Faux file object attached to a socket object."""
def __init__(self, *args, **kwargs):
self.bytes_read = 0
self.bytes_written = 0
socket._fileobject.__init__(self, *args, **kwargs)
def sendall(self, data):
"""Sendall for non-blocking sockets."""
while data:
try:
bytes_sent = self.send(data)
data = data[bytes_sent:]
except socket.error, e:
if e.args[0] not in socket_errors_nonblocking:
raise
def send(self, data):
bytes_sent = self._sock.send(data)
self.bytes_written += bytes_sent
return bytes_sent
def flush(self):
if self._wbuf:
buffer = "".join(self._wbuf)
self._wbuf = []
self.sendall(buffer)
def recv(self, size):
while True:
try:
data = self._sock.recv(size)
self.bytes_read += len(data)
return data
except socket.error, e:
if (e.args[0] not in socket_errors_nonblocking
and e.args[0] not in socket_error_eintr):
raise
if not _fileobject_uses_str_type:
def read(self, size=-1):
# Use max, disallow tiny reads in a loop as they are very
# inefficient.
# We never leave read() with any leftover data from a new recv()
# call in our internal buffer.
rbufsize = max(self._rbufsize, self.default_bufsize)
# Our use of StringIO rather than lists of string objects returned
# by recv() minimizes memory usage and fragmentation that occurs
# when rbufsize is large compared to the typical return value of
# recv().
buf = self._rbuf
buf.seek(0, 2) # seek end
if size < 0:
# Read until EOF
# reset _rbuf. we consume it via buf.
self._rbuf = io.BytesIO()
while True:
data = self.recv(rbufsize)
if not data:
break
buf.write(data)
return buf.getvalue()
else:
# Read until size bytes or EOF seen, whichever comes first
buf_len = buf.tell()
if buf_len >= size:
# Already have size bytes in our buffer? Extract and
# return.
buf.seek(0)
rv = buf.read(size)
self._rbuf = io.BytesIO()
self._rbuf.write(buf.read())
return rv
# reset _rbuf. we consume it via buf.
self._rbuf = io.BytesIO()
while True:
left = size - buf_len
# recv() will malloc the amount of memory given as its
# parameter even though it often returns much less data
# than that. The returned data string is short lived
# as we copy it into a StringIO and free it. This avoids
# fragmentation issues on many platforms.
data = self.recv(left)
if not data:
break
n = len(data)
if n == size and not buf_len:
# Shortcut. Avoid buffer data copies when:
# - We have no data in our buffer.
# AND
# - Our call to recv returned exactly the
# number of bytes we were asked to read.
return data
if n == left:
buf.write(data)
del data # explicit free
break
assert n <= left, "recv(%d) returned %d bytes" % (left, n)
buf.write(data)
buf_len += n
del data # explicit free
#assert buf_len == buf.tell()
return buf.getvalue()
def readline(self, size=-1):
buf = self._rbuf
buf.seek(0, 2) # seek end
if buf.tell() > 0:
# check if we already have it in our buffer
buf.seek(0)
bline = buf.readline(size)
if bline.endswith('\n') or len(bline) == size:
self._rbuf = io.BytesIO()
self._rbuf.write(buf.read())
return bline
del bline
if size < 0:
# Read until \n or EOF, whichever comes first
if self._rbufsize <= 1:
# Speed up unbuffered case
buf.seek(0)
buffers = [buf.read()]
# reset _rbuf. we consume it via buf.
self._rbuf = io.BytesIO()
data = None
recv = self.recv
while data != "\n":
data = recv(1)
if not data:
break
buffers.append(data)
return "".join(buffers)
buf.seek(0, 2) # seek end
# reset _rbuf. we consume it via buf.
self._rbuf = io.BytesIO()
while True:
data = self.recv(self._rbufsize)
if not data:
break
nl = data.find('\n')
if nl >= 0:
nl += 1
buf.write(data[:nl])
self._rbuf.write(data[nl:])
del data
break
buf.write(data)
return buf.getvalue()
else:
# Read until size bytes or \n or EOF seen, whichever comes
# first
buf.seek(0, 2) # seek end
buf_len = buf.tell()
if buf_len >= size:
buf.seek(0)
rv = buf.read(size)
self._rbuf = io.BytesIO()
self._rbuf.write(buf.read())
return rv
# reset _rbuf. we consume it via buf.
self._rbuf = io.BytesIO()
while True:
data = self.recv(self._rbufsize)
if not data:
break
left = size - buf_len
# did we just receive a newline?
nl = data.find('\n', 0, left)
if nl >= 0:
nl += 1
# save the excess data to _rbuf
self._rbuf.write(data[nl:])
if buf_len:
buf.write(data[:nl])
break
else:
# Shortcut. Avoid data copy through buf when
# returning a substring of our first recv().
return data[:nl]
n = len(data)
if n == size and not buf_len:
# Shortcut. Avoid data copy through buf when
# returning exactly all of our first recv().
return data
if n >= left:
buf.write(data[:left])
self._rbuf.write(data[left:])
break
buf.write(data)
buf_len += n
#assert buf_len == buf.tell()
return buf.getvalue()
else:
def read(self, size=-1):
if size < 0:
# Read until EOF
buffers = [self._rbuf]
self._rbuf = ""
if self._rbufsize <= 1:
recv_size = self.default_bufsize
else:
recv_size = self._rbufsize
while True:
data = self.recv(recv_size)
if not data:
break
buffers.append(data)
return "".join(buffers)
else:
# Read until size bytes or EOF seen, whichever comes first
data = self._rbuf
buf_len = len(data)
if buf_len >= size:
self._rbuf = data[size:]
return data[:size]
buffers = []
if data:
buffers.append(data)
self._rbuf = ""
while True:
left = size - buf_len
recv_size = max(self._rbufsize, left)
data = self.recv(recv_size)
if not data:
break
buffers.append(data)
n = len(data)
if n >= left:
self._rbuf = data[left:]
buffers[-1] = data[:left]
break
buf_len += n
return "".join(buffers)
def readline(self, size=-1):
data = self._rbuf
if size < 0:
# Read until \n or EOF, whichever comes first
if self._rbufsize <= 1:
# Speed up unbuffered case
assert data == ""
buffers = []
while data != "\n":
data = self.recv(1)
if not data:
break
buffers.append(data)
return "".join(buffers)
nl = data.find('\n')
if nl >= 0:
nl += 1
self._rbuf = data[nl:]
return data[:nl]
buffers = []
if data:
buffers.append(data)
self._rbuf = ""
while True:
data = self.recv(self._rbufsize)
if not data:
break
buffers.append(data)
nl = data.find('\n')
if nl >= 0:
nl += 1
self._rbuf = data[nl:]
buffers[-1] = data[:nl]
break
return "".join(buffers)
else:
# Read until size bytes or \n or EOF seen, whichever comes
# first
nl = data.find('\n', 0, size)
if nl >= 0:
nl += 1
self._rbuf = data[nl:]
return data[:nl]
buf_len = len(data)
if buf_len >= size:
self._rbuf = data[size:]
return data[:size]
buffers = []
if data:
buffers.append(data)
self._rbuf = ""
while True:
data = self.recv(self._rbufsize)
if not data:
break
buffers.append(data)
left = size - buf_len
nl = data.find('\n', 0, left)
if nl >= 0:
nl += 1
self._rbuf = data[nl:]
buffers[-1] = data[:nl]
break
n = len(data)
if n >= left:
self._rbuf = data[left:]
buffers[-1] = data[:left]
break
buf_len += n
return "".join(buffers)
class HTTPConnection(object):
"""An HTTP connection (active socket).
server: the Server object which received this connection.
socket: the raw socket object (usually TCP) for this connection.
makefile: a fileobject class for reading from the socket.
"""
remote_addr = None
remote_port = None
ssl_env = None
rbufsize = DEFAULT_BUFFER_SIZE
wbufsize = DEFAULT_BUFFER_SIZE
RequestHandlerClass = HTTPRequest
def __init__(self, server, sock, makefile=CP_fileobject):
self.server = server
self.socket = sock
self.rfile = makefile(sock, "rb", self.rbufsize)
self.wfile = makefile(sock, "wb", self.wbufsize)
self.requests_seen = 0
def communicate(self):
"""Read each request and respond appropriately."""
request_seen = False
try:
while True:
# (re)set req to None so that if something goes wrong in
# the RequestHandlerClass constructor, the error doesn't
# get written to the previous request.
req = None
req = self.RequestHandlerClass(self.server, self)
# This order of operations should guarantee correct pipelining.
req.parse_request()
if self.server.stats['Enabled']:
self.requests_seen += 1
if not req.ready:
# Something went wrong in the parsing (and the server has
# probably already made a simple_response). Return and
# let the conn close.
return
request_seen = True
req.respond()
if req.close_connection:
return
except socket.error:
e = sys.exc_info()[1]
errnum = e.args[0]
# sadly SSL sockets return a different (longer) time out string
if (
errnum == 'timed out' or
errnum == 'The read operation timed out'
):
# Don't error if we're between requests; only error
# if 1) no request has been started at all, or 2) we're
# in the middle of a request.
# See https://github.com/cherrypy/cherrypy/issues/853
if (not request_seen) or (req and req.started_request):
# Don't bother writing the 408 if the response
# has already started being written.
if req and not req.sent_headers:
try:
req.simple_response("408 Request Timeout")
except FatalSSLAlert:
# Close the connection.
return
elif errnum not in socket_errors_to_ignore:
self.server.error_log("socket.error %s" % repr(errnum),
level=logging.WARNING, traceback=True)
if req and not req.sent_headers:
try:
req.simple_response("500 Internal Server Error")
except FatalSSLAlert:
# Close the connection.
return
return
except (KeyboardInterrupt, SystemExit):
raise
except FatalSSLAlert:
# Close the connection.
return
except NoSSLError:
if req and not req.sent_headers:
# Unwrap our wfile
self.wfile = CP_fileobject(
self.socket._sock, "wb", self.wbufsize)
req.simple_response(
"400 Bad Request",
"The client sent a plain HTTP request, but "
"this server only speaks HTTPS on this port.")
self.linger = True
except Exception:
e = sys.exc_info()[1]
self.server.error_log(repr(e), level=logging.ERROR, traceback=True)
if req and not req.sent_headers:
try:
req.simple_response("500 Internal Server Error")
except FatalSSLAlert:
# Close the connection.
return
linger = False
def close(self):
"""Close the socket underlying this connection."""
self.rfile.close()
if not self.linger:
# Python's socket module does NOT call close on the kernel
# socket when you call socket.close(). We do so manually here
# because we want this server to send a FIN TCP segment
# immediately. Note this must be called *before* calling
# socket.close(), because the latter drops its reference to
# the kernel socket.
if hasattr(self.socket, '_sock'):
self.socket._sock.close()
self.socket.close()
else:
# On the other hand, sometimes we want to hang around for a bit
# to make sure the client has a chance to read our entire
# response. Skipping the close() calls here delays the FIN
# packet until the socket object is garbage-collected later.
# Someday, perhaps, we'll do the full lingering_close that
# Apache does, but not today.
pass
class TrueyZero(object):
"""An object which equals and does math like the integer 0 but evals True.
"""
def __add__(self, other):
return other
def __radd__(self, other):
return other
trueyzero = TrueyZero()
_SHUTDOWNREQUEST = None
class WorkerThread(threading.Thread):
"""Thread which continuously polls a Queue for Connection objects.
Due to the timing issues of polling a Queue, a WorkerThread does not
check its own 'ready' flag after it has started. To stop the thread,
it is necessary to stick a _SHUTDOWNREQUEST object onto the Queue
(one for each running WorkerThread).
"""
conn = None
"""The current connection pulled off the Queue, or None."""
server = None
"""The HTTP Server which spawned this thread, and which owns the
Queue and is placing active connections into it."""
ready = False
"""A simple flag for the calling server to know when this thread
has begun polling the Queue."""
def __init__(self, server):
self.ready = False
self.server = server
self.requests_seen = 0
self.bytes_read = 0
self.bytes_written = 0
self.start_time = None
self.work_time = 0
self.stats = {
'Requests': lambda s: self.requests_seen + (
(self.start_time is None) and
trueyzero or
self.conn.requests_seen
),
'Bytes Read': lambda s: self.bytes_read + (
(self.start_time is None) and
trueyzero or
self.conn.rfile.bytes_read
),
'Bytes Written': lambda s: self.bytes_written + (
(self.start_time is None) and
trueyzero or
self.conn.wfile.bytes_written
),
'Work Time': lambda s: self.work_time + (
(self.start_time is None) and
trueyzero or
time.time() - self.start_time
),
'Read Throughput': lambda s: s['Bytes Read'](s) / (
s['Work Time'](s) or 1e-6),
'Write Throughput': lambda s: s['Bytes Written'](s) / (
s['Work Time'](s) or 1e-6),
}
threading.Thread.__init__(self)
def run(self):
self.server.stats['Worker Threads'][self.getName()] = self.stats
try:
self.ready = True
while True:
conn = self.server.requests.get()
if conn is _SHUTDOWNREQUEST:
return
self.conn = conn
if self.server.stats['Enabled']:
self.start_time = time.time()
try:
conn.communicate()
finally:
conn.close()
if self.server.stats['Enabled']:
self.requests_seen += self.conn.requests_seen
self.bytes_read += self.conn.rfile.bytes_read
self.bytes_written += self.conn.wfile.bytes_written
self.work_time += time.time() - self.start_time
self.start_time = None
self.conn = None
except (KeyboardInterrupt, SystemExit):
exc = sys.exc_info()[1]
self.server.interrupt = exc
class ThreadPool(object):
"""A Request Queue for an HTTPServer which pools threads.
ThreadPool objects must provide min, get(), put(obj), start()
and stop(timeout) attributes.
"""
def __init__(self, server, min=10, max=-1,
accepted_queue_size=-1, accepted_queue_timeout=10):
self.server = server
self.min = min
self.max = max
self._threads = []
self._queue = queue.Queue(maxsize=accepted_queue_size)
self._queue_put_timeout = accepted_queue_timeout
self.get = self._queue.get
def start(self):
"""Start the pool of threads."""
for i in range(self.min):
self._threads.append(WorkerThread(self.server))
for worker in self._threads:
worker.setName("CP Server " + worker.getName())
worker.start()
for worker in self._threads:
while not worker.ready:
time.sleep(.1)
def _get_idle(self):
"""Number of worker threads which are idle. Read-only."""
return len([t for t in self._threads if t.conn is None])
idle = property(_get_idle, doc=_get_idle.__doc__)
def put(self, obj):
self._queue.put(obj, block=True, timeout=self._queue_put_timeout)
if obj is _SHUTDOWNREQUEST:
return
def grow(self, amount):
"""Spawn new worker threads (not above self.max)."""
if self.max > 0:
budget = max(self.max - len(self._threads), 0)
else:
# self.max <= 0 indicates no maximum
budget = float('inf')
n_new = min(amount, budget)
workers = [self._spawn_worker() for i in range(n_new)]
while not self._all(operator.attrgetter('ready'), workers):
time.sleep(.1)
self._threads.extend(workers)
def _spawn_worker(self):
worker = WorkerThread(self.server)
worker.setName("CP Server " + worker.getName())
worker.start()
return worker
def _all(func, items):
results = [func(item) for item in items]
return reduce(operator.and_, results, True)
_all = staticmethod(_all)
def shrink(self, amount):
"""Kill off worker threads (not below self.min)."""
# Grow/shrink the pool if necessary.
# Remove any dead threads from our list
for t in self._threads:
if not t.isAlive():
self._threads.remove(t)
amount -= 1
# calculate the number of threads above the minimum
n_extra = max(len(self._threads) - self.min, 0)
# don't remove more than amount
n_to_remove = min(amount, n_extra)
# put shutdown requests on the queue equal to the number of threads
# to remove. As each request is processed by a worker, that worker
# will terminate and be culled from the list.
for n in range(n_to_remove):
self._queue.put(_SHUTDOWNREQUEST)
def stop(self, timeout=5):
# Must shut down threads here so the code that calls
# this method can know when all threads are stopped.
for worker in self._threads:
self._queue.put(_SHUTDOWNREQUEST)
# Don't join currentThread (when stop is called inside a request).
current = threading.currentThread()
if timeout and timeout >= 0:
endtime = time.time() + timeout
while self._threads:
worker = self._threads.pop()
if worker is not current and worker.isAlive():
try:
if timeout is None or timeout < 0:
worker.join()
else:
remaining_time = endtime - time.time()
if remaining_time > 0:
worker.join(remaining_time)
if worker.isAlive():
# We exhausted the timeout.
# Forcibly shut down the socket.
c = worker.conn
if c and not c.rfile.closed:
try:
c.socket.shutdown(socket.SHUT_RD)
except TypeError:
# pyOpenSSL sockets don't take an arg
c.socket.shutdown()
worker.join()
except (AssertionError,
# Ignore repeated Ctrl-C.
# See
# https://github.com/cherrypy/cherrypy/issues/691.
KeyboardInterrupt):
pass
def _get_qsize(self):
return self._queue.qsize()
qsize = property(_get_qsize)
try:
import fcntl
except ImportError:
try:
from ctypes import windll, WinError
import ctypes.wintypes
_SetHandleInformation = windll.kernel32.SetHandleInformation
_SetHandleInformation.argtypes = [
ctypes.wintypes.HANDLE,
ctypes.wintypes.DWORD,
ctypes.wintypes.DWORD,
]
_SetHandleInformation.restype = ctypes.wintypes.BOOL
except ImportError:
def prevent_socket_inheritance(sock):
"""Dummy function, since neither fcntl nor ctypes are available."""
pass
else:
def prevent_socket_inheritance(sock):
"""Mark the given socket fd as non-inheritable (Windows)."""
if not _SetHandleInformation(sock.fileno(), 1, 0):
raise WinError()
else:
def prevent_socket_inheritance(sock):
"""Mark the given socket fd as non-inheritable (POSIX)."""
fd = sock.fileno()
old_flags = fcntl.fcntl(fd, fcntl.F_GETFD)
fcntl.fcntl(fd, fcntl.F_SETFD, old_flags | fcntl.FD_CLOEXEC)
class SSLAdapter(object):
"""Base class for SSL driver library adapters.
Required methods:
* ``wrap(sock) -> (wrapped socket, ssl environ dict)``
* ``makefile(sock, mode='r', bufsize=DEFAULT_BUFFER_SIZE) ->
socket file object``
"""
def __init__(self, certificate, private_key, certificate_chain=None):
self.certificate = certificate
self.private_key = private_key
self.certificate_chain = certificate_chain
def wrap(self, sock):
raise NotImplemented
def makefile(self, sock, mode='r', bufsize=DEFAULT_BUFFER_SIZE):
raise NotImplemented
class HTTPServer(object):
"""An HTTP server."""
_bind_addr = "127.0.0.1"
_interrupt = None
gateway = None
"""A Gateway instance."""
minthreads = None
"""The minimum number of worker threads to create (default 10)."""
maxthreads = None
"""The maximum number of worker threads to create (default -1 = no limit).
"""
server_name = None
"""The name of the server; defaults to socket.gethostname()."""
protocol = "HTTP/1.1"
"""The version string to write in the Status-Line of all HTTP responses.
For example, "HTTP/1.1" is the default. This also limits the supported
features used in the response."""
request_queue_size = 5
"""The 'backlog' arg to socket.listen(); max queued connections
(default 5).
"""
shutdown_timeout = 5
"""The total time, in seconds, to wait for worker threads to cleanly exit.
"""
timeout = 10
"""The timeout in seconds for accepted connections (default 10)."""
version = "CherryPy/" + cp_version
"""A version string for the HTTPServer."""
software = None
"""The value to set for the SERVER_SOFTWARE entry in the WSGI environ.
If None, this defaults to ``'%s Server' % self.version``."""
ready = False
"""An internal flag which marks whether the socket is accepting connections
"""
max_request_header_size = 0
"""The maximum size, in bytes, for request headers, or 0 for no limit."""
max_request_body_size = 0
"""The maximum size, in bytes, for request bodies, or 0 for no limit."""
nodelay = True
"""If True (the default since 3.1), sets the TCP_NODELAY socket option."""
ConnectionClass = HTTPConnection
"""The class to use for handling HTTP connections."""
ssl_adapter = None
"""An instance of SSLAdapter (or a subclass).
You must have the corresponding SSL driver library installed."""
def __init__(self, bind_addr, gateway, minthreads=10, maxthreads=-1,
server_name=None):
self.bind_addr = bind_addr
self.gateway = gateway
self.requests = ThreadPool(self, min=minthreads or 1, max=maxthreads)
if not server_name:
server_name = socket.gethostname()
self.server_name = server_name
self.clear_stats()
def clear_stats(self):
self._start_time = None
self._run_time = 0
self.stats = {
'Enabled': False,
'Bind Address': lambda s: repr(self.bind_addr),
'Run time': lambda s: (not s['Enabled']) and -1 or self.runtime(),
'Accepts': 0,
'Accepts/sec': lambda s: s['Accepts'] / self.runtime(),
'Queue': lambda s: getattr(self.requests, "qsize", None),
'Threads': lambda s: len(getattr(self.requests, "_threads", [])),
'Threads Idle': lambda s: getattr(self.requests, "idle", None),
'Socket Errors': 0,
'Requests': lambda s: (not s['Enabled']) and -1 or sum(
[w['Requests'](w) for w in s['Worker Threads'].values()], 0),
'Bytes Read': lambda s: (not s['Enabled']) and -1 or sum(
[w['Bytes Read'](w) for w in s['Worker Threads'].values()], 0),
'Bytes Written': lambda s: (not s['Enabled']) and -1 or sum(
[w['Bytes Written'](w) for w in s['Worker Threads'].values()],
0),
'Work Time': lambda s: (not s['Enabled']) and -1 or sum(
[w['Work Time'](w) for w in s['Worker Threads'].values()], 0),
'Read Throughput': lambda s: (not s['Enabled']) and -1 or sum(
[w['Bytes Read'](w) / (w['Work Time'](w) or 1e-6)
for w in s['Worker Threads'].values()], 0),
'Write Throughput': lambda s: (not s['Enabled']) and -1 or sum(
[w['Bytes Written'](w) / (w['Work Time'](w) or 1e-6)
for w in s['Worker Threads'].values()], 0),
'Worker Threads': {},
}
logging.statistics["CherryPy HTTPServer %d" % id(self)] = self.stats
def runtime(self):
if self._start_time is None:
return self._run_time
else:
return self._run_time + (time.time() - self._start_time)
def __str__(self):
return "%s.%s(%r)" % (self.__module__, self.__class__.__name__,
self.bind_addr)
def _get_bind_addr(self):
return self._bind_addr
def _set_bind_addr(self, value):
if isinstance(value, tuple) and value[0] in ('', None):
# Despite the socket module docs, using '' does not
# allow AI_PASSIVE to work. Passing None instead
# returns '0.0.0.0' like we want. In other words:
# host AI_PASSIVE result
# '' Y 192.168.x.y
# '' N 192.168.x.y
# None Y 0.0.0.0
# None N 127.0.0.1
# But since you can get the same effect with an explicit
# '0.0.0.0', we deny both the empty string and None as values.
raise ValueError("Host values of '' or None are not allowed. "
"Use '0.0.0.0' (IPv4) or '::' (IPv6) instead "
"to listen on all active interfaces.")
self._bind_addr = value
bind_addr = property(
_get_bind_addr,
_set_bind_addr,
doc="""The interface on which to listen for connections.
For TCP sockets, a (host, port) tuple. Host values may be any IPv4
or IPv6 address, or any valid hostname. The string 'localhost' is a
synonym for '127.0.0.1' (or '::1', if your hosts file prefers IPv6).
The string '0.0.0.0' is a special IPv4 entry meaning "any active
interface" (INADDR_ANY), and '::' is the similar IN6ADDR_ANY for
IPv6. The empty string or None are not allowed.
For UNIX sockets, supply the filename as a string.""")
def start(self):
"""Run the server forever."""
# We don't have to trap KeyboardInterrupt or SystemExit here,
# because cherrpy.server already does so, calling self.stop() for us.
# If you're using this server with another framework, you should
# trap those exceptions in whatever code block calls start().
self._interrupt = None
if self.software is None:
self.software = "%s Server" % self.version
# Select the appropriate socket
if isinstance(self.bind_addr, basestring):
# AF_UNIX socket
# So we can reuse the socket...
try:
os.unlink(self.bind_addr)
except:
pass
# So everyone can access the socket...
try:
os.chmod(self.bind_addr, 0o777)
except:
pass
info = [
(socket.AF_UNIX, socket.SOCK_STREAM, 0, "", self.bind_addr)]
else:
# AF_INET or AF_INET6 socket
# Get the correct address family for our host (allows IPv6
# addresses)
host, port = self.bind_addr
try:
info = socket.getaddrinfo(
host, port, socket.AF_UNSPEC,
socket.SOCK_STREAM, 0, socket.AI_PASSIVE)
except socket.gaierror:
if ':' in self.bind_addr[0]:
info = [(socket.AF_INET6, socket.SOCK_STREAM,
0, "", self.bind_addr + (0, 0))]
else:
info = [(socket.AF_INET, socket.SOCK_STREAM,
0, "", self.bind_addr)]
self.socket = None
msg = "No socket could be created"
for res in info:
af, socktype, proto, canonname, sa = res
try:
self.bind(af, socktype, proto)
except socket.error, serr:
msg = "%s -- (%s: %s)" % (msg, sa, serr)
if self.socket:
self.socket.close()
self.socket = None
continue
break
if not self.socket:
raise socket.error(msg)
# Timeout so KeyboardInterrupt can be caught on Win32
self.socket.settimeout(1)
self.socket.listen(self.request_queue_size)
# Create worker threads
self.requests.start()
self.ready = True
self._start_time = time.time()
while self.ready:
try:
self.tick()
except (KeyboardInterrupt, SystemExit):
raise
except:
self.error_log("Error in HTTPServer.tick", level=logging.ERROR,
traceback=True)
if self.interrupt:
while self.interrupt is True:
# Wait for self.stop() to complete. See _set_interrupt.
time.sleep(0.1)
if self.interrupt:
raise self.interrupt
def error_log(self, msg="", level=20, traceback=False):
# Override this in subclasses as desired
sys.stderr.write(msg + '\n')
sys.stderr.flush()
if traceback:
tblines = traceback_.format_exc()
sys.stderr.write(tblines)
sys.stderr.flush()
def bind(self, family, type, proto=0):
"""Create (or recreate) the actual socket object."""
self.socket = socket.socket(family, type, proto)
prevent_socket_inheritance(self.socket)
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
if self.nodelay and not isinstance(self.bind_addr, str):
self.socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
if self.ssl_adapter is not None:
self.socket = self.ssl_adapter.bind(self.socket)
# If listening on the IPV6 any address ('::' = IN6ADDR_ANY),
# activate dual-stack. See
# https://github.com/cherrypy/cherrypy/issues/871.
if (hasattr(socket, 'AF_INET6') and family == socket.AF_INET6
and self.bind_addr[0] in ('::', '::0', '::0.0.0.0')):
try:
self.socket.setsockopt(
socket.IPPROTO_IPV6, socket.IPV6_V6ONLY, 0)
except (AttributeError, socket.error):
# Apparently, the socket option is not available in
# this machine's TCP stack
pass
self.socket.bind(self.bind_addr)
def tick(self):
"""Accept a new connection and put it on the Queue."""
try:
s, addr = self.socket.accept()
if self.stats['Enabled']:
self.stats['Accepts'] += 1
if not self.ready:
return
prevent_socket_inheritance(s)
if hasattr(s, 'settimeout'):
s.settimeout(self.timeout)
makefile = CP_fileobject
ssl_env = {}
# if ssl cert and key are set, we try to be a secure HTTP server
if self.ssl_adapter is not None:
try:
s, ssl_env = self.ssl_adapter.wrap(s)
except NoSSLError:
msg = ("The client sent a plain HTTP request, but "
"this server only speaks HTTPS on this port.")
buf = ["%s 400 Bad Request\r\n" % self.protocol,
"Content-Length: %s\r\n" % len(msg),
"Content-Type: text/plain\r\n\r\n",
msg]
wfile = makefile(s._sock, "wb", DEFAULT_BUFFER_SIZE)
try:
wfile.sendall("".join(buf))
except socket.error:
x = sys.exc_info()[1]
if x.args[0] not in socket_errors_to_ignore:
raise
return
if not s:
return
makefile = self.ssl_adapter.makefile
# Re-apply our timeout since we may have a new socket object
if hasattr(s, 'settimeout'):
s.settimeout(self.timeout)
conn = self.ConnectionClass(self, s, makefile)
if not isinstance(self.bind_addr, basestring):
# optional values
# Until we do DNS lookups, omit REMOTE_HOST
if addr is None: # sometimes this can happen
# figure out if AF_INET or AF_INET6.
if len(s.getsockname()) == 2:
# AF_INET
addr = ('0.0.0.0', 0)
else:
# AF_INET6
addr = ('::', 0)
conn.remote_addr = addr[0]
conn.remote_port = addr[1]
conn.ssl_env = ssl_env
try:
self.requests.put(conn)
except queue.Full:
# Just drop the conn. TODO: write 503 back?
conn.close()
return
except socket.timeout:
# The only reason for the timeout in start() is so we can
# notice keyboard interrupts on Win32, which don't interrupt
# accept() by default
return
except socket.error:
x = sys.exc_info()[1]
if self.stats['Enabled']:
self.stats['Socket Errors'] += 1
if x.args[0] in socket_error_eintr:
# I *think* this is right. EINTR should occur when a signal
# is received during the accept() call; all docs say retry
# the call, and I *think* I'm reading it right that Python
# will then go ahead and poll for and handle the signal
# elsewhere. See
# https://github.com/cherrypy/cherrypy/issues/707.
return
if x.args[0] in socket_errors_nonblocking:
# Just try again. See
# https://github.com/cherrypy/cherrypy/issues/479.
return
if x.args[0] in socket_errors_to_ignore:
# Our socket was closed.
# See https://github.com/cherrypy/cherrypy/issues/686.
return
raise
def _get_interrupt(self):
return self._interrupt
def _set_interrupt(self, interrupt):
self._interrupt = True
self.stop()
self._interrupt = interrupt
interrupt = property(_get_interrupt, _set_interrupt,
doc="Set this to an Exception instance to "
"interrupt the server.")
def stop(self):
"""Gracefully shutdown a server that is serving forever."""
self.ready = False
if self._start_time is not None:
self._run_time += (time.time() - self._start_time)
self._start_time = None
sock = getattr(self, "socket", None)
if sock:
if not isinstance(self.bind_addr, basestring):
# Touch our own socket to make accept() return immediately.
try:
host, port = sock.getsockname()[:2]
except socket.error:
x = sys.exc_info()[1]
if x.args[0] not in socket_errors_to_ignore:
# Changed to use error code and not message
# See
# https://github.com/cherrypy/cherrypy/issues/860.
raise
else:
# Note that we're explicitly NOT using AI_PASSIVE,
# here, because we want an actual IP to touch.
# localhost won't work if we've bound to a public IP,
# but it will if we bound to '0.0.0.0' (INADDR_ANY).
for res in socket.getaddrinfo(host, port, socket.AF_UNSPEC,
socket.SOCK_STREAM):
af, socktype, proto, canonname, sa = res
s = None
try:
s = socket.socket(af, socktype, proto)
# See
# http://groups.google.com/group/cherrypy-users/
# browse_frm/thread/bbfe5eb39c904fe0
s.settimeout(1.0)
s.connect((host, port))
s.close()
except socket.error:
if s:
s.close()
if hasattr(sock, "close"):
sock.close()
self.socket = None
self.requests.stop(self.shutdown_timeout)
class Gateway(object):
"""A base class to interface HTTPServer with other systems, such as WSGI.
"""
def __init__(self, req):
self.req = req
def respond(self):
"""Process the current request. Must be overridden in a subclass."""
raise NotImplemented
# These may either be wsgiserver.SSLAdapter subclasses or the string names
# of such classes (in which case they will be lazily loaded).
ssl_adapters = {
'builtin': 'cherrypy.wsgiserver.ssl_builtin.BuiltinSSLAdapter',
'pyopenssl': 'cherrypy.wsgiserver.ssl_pyopenssl.pyOpenSSLAdapter',
}
def get_ssl_adapter_class(name='builtin'):
"""Return an SSL adapter class for the given name."""
adapter = ssl_adapters[name.lower()]
if isinstance(adapter, basestring):
last_dot = adapter.rfind(".")
attr_name = adapter[last_dot + 1:]
mod_path = adapter[:last_dot]
try:
mod = sys.modules[mod_path]
if mod is None:
raise KeyError()
except KeyError:
# The last [''] is important.
mod = __import__(mod_path, globals(), locals(), [''])
# Let an AttributeError propagate outward.
try:
adapter = getattr(mod, attr_name)
except AttributeError:
raise AttributeError("'%s' object has no attribute '%s'"
% (mod_path, attr_name))
return adapter
# ------------------------------- WSGI Stuff -------------------------------- #
class CherryPyWSGIServer(HTTPServer):
"""A subclass of HTTPServer which calls a WSGI application."""
wsgi_version = (1, 0)
"""The version of WSGI to produce."""
def __init__(self, bind_addr, wsgi_app, numthreads=10, server_name=None,
max=-1, request_queue_size=5, timeout=10, shutdown_timeout=5,
accepted_queue_size=-1, accepted_queue_timeout=10):
self.requests = ThreadPool(self, min=numthreads or 1, max=max,
accepted_queue_size=accepted_queue_size,
accepted_queue_timeout=accepted_queue_timeout)
self.wsgi_app = wsgi_app
self.gateway = wsgi_gateways[self.wsgi_version]
self.bind_addr = bind_addr
if not server_name:
server_name = socket.gethostname()
self.server_name = server_name
self.request_queue_size = request_queue_size
self.timeout = timeout
self.shutdown_timeout = shutdown_timeout
self.clear_stats()
def _get_numthreads(self):
return self.requests.min
def _set_numthreads(self, value):
self.requests.min = value
numthreads = property(_get_numthreads, _set_numthreads)
class WSGIGateway(Gateway):
"""A base class to interface HTTPServer with WSGI."""
def __init__(self, req):
self.req = req
self.started_response = False
self.env = self.get_environ()
self.remaining_bytes_out = None
def get_environ(self):
"""Return a new environ dict targeting the given wsgi.version"""
raise NotImplemented
def respond(self):
"""Process the current request."""
response = self.req.server.wsgi_app(self.env, self.start_response)
try:
for chunk in response:
# "The start_response callable must not actually transmit
# the response headers. Instead, it must store them for the
# server or gateway to transmit only after the first
# iteration of the application return value that yields
# a NON-EMPTY string, or upon the application's first
# invocation of the write() callable." (PEP 333)
if chunk:
if isinstance(chunk, unicodestr):
chunk = chunk.encode('ISO-8859-1')
self.write(chunk)
finally:
if hasattr(response, "close"):
response.close()
def start_response(self, status, headers, exc_info=None):
"""WSGI callable to begin the HTTP response."""
# "The application may call start_response more than once,
# if and only if the exc_info argument is provided."
if self.started_response and not exc_info:
raise AssertionError("WSGI start_response called a second "
"time with no exc_info.")
self.started_response = True
# "if exc_info is provided, and the HTTP headers have already been
# sent, start_response must raise an error, and should raise the
# exc_info tuple."
if self.req.sent_headers:
try:
raise exc_info[0], exc_info[1], exc_info[2]
finally:
exc_info = None
self.req.status = status
for k, v in headers:
if not isinstance(k, str):
raise TypeError(
"WSGI response header key %r is not of type str." % k)
if not isinstance(v, str):
raise TypeError(
"WSGI response header value %r is not of type str." % v)
if k.lower() == 'content-length':
self.remaining_bytes_out = int(v)
self.req.outheaders.extend(headers)
return self.write
def write(self, chunk):
"""WSGI callable to write unbuffered data to the client.
This method is also used internally by start_response (to write
data from the iterable returned by the WSGI application).
"""
if not self.started_response:
raise AssertionError("WSGI write called before start_response.")
chunklen = len(chunk)
rbo = self.remaining_bytes_out
if rbo is not None and chunklen > rbo:
if not self.req.sent_headers:
# Whew. We can send a 500 to the client.
self.req.simple_response(
"500 Internal Server Error",
"The requested resource returned more bytes than the "
"declared Content-Length.")
else:
# Dang. We have probably already sent data. Truncate the chunk
# to fit (so the client doesn't hang) and raise an error later.
chunk = chunk[:rbo]
if not self.req.sent_headers:
self.req.sent_headers = True
self.req.send_headers()
self.req.write(chunk)
if rbo is not None:
rbo -= chunklen
if rbo < 0:
raise ValueError(
"Response body exceeds the declared Content-Length.")
class WSGIGateway_10(WSGIGateway):
"""A Gateway class to interface HTTPServer with WSGI 1.0.x."""
def get_environ(self):
"""Return a new environ dict targeting the given wsgi.version"""
req = self.req
env = {
# set a non-standard environ entry so the WSGI app can know what
# the *real* server protocol is (and what features to support).
# See http://www.faqs.org/rfcs/rfc2145.html.
'ACTUAL_SERVER_PROTOCOL': req.server.protocol,
'PATH_INFO': req.path,
'QUERY_STRING': req.qs,
'REMOTE_ADDR': req.conn.remote_addr or '',
'REMOTE_PORT': str(req.conn.remote_port or ''),
'REQUEST_METHOD': req.method,
'REQUEST_URI': req.uri,
'SCRIPT_NAME': '',
'SERVER_NAME': req.server.server_name,
# Bah. "SERVER_PROTOCOL" is actually the REQUEST protocol.
'SERVER_PROTOCOL': req.request_protocol,
'SERVER_SOFTWARE': req.server.software,
'wsgi.errors': sys.stderr,
'wsgi.input': req.rfile,
'wsgi.multiprocess': False,
'wsgi.multithread': True,
'wsgi.run_once': False,
'wsgi.url_scheme': req.scheme,
'wsgi.version': (1, 0),
}
if isinstance(req.server.bind_addr, basestring):
# AF_UNIX. This isn't really allowed by WSGI, which doesn't
# address unix domain sockets. But it's better than nothing.
env["SERVER_PORT"] = ""
else:
env["SERVER_PORT"] = str(req.server.bind_addr[1])
# Request headers
for k, v in req.inheaders.iteritems():
env["HTTP_" + k.upper().replace("-", "_")] = v
# CONTENT_TYPE/CONTENT_LENGTH
ct = env.pop("HTTP_CONTENT_TYPE", None)
if ct is not None:
env["CONTENT_TYPE"] = ct
cl = env.pop("HTTP_CONTENT_LENGTH", None)
if cl is not None:
env["CONTENT_LENGTH"] = cl
if req.conn.ssl_env:
env.update(req.conn.ssl_env)
return env
class WSGIGateway_u0(WSGIGateway_10):
"""A Gateway class to interface HTTPServer with WSGI u.0.
WSGI u.0 is an experimental protocol, which uses unicode for keys and
values in both Python 2 and Python 3.
"""
def get_environ(self):
"""Return a new environ dict targeting the given wsgi.version"""
req = self.req
env_10 = WSGIGateway_10.get_environ(self)
env = dict([(k.decode('ISO-8859-1'), v)
for k, v in env_10.iteritems()])
env[u'wsgi.version'] = ('u', 0)
# Request-URI
env.setdefault(u'wsgi.url_encoding', u'utf-8')
try:
for key in [u"PATH_INFO", u"SCRIPT_NAME", u"QUERY_STRING"]:
env[key] = env_10[str(key)].decode(env[u'wsgi.url_encoding'])
except UnicodeDecodeError:
# Fall back to latin 1 so apps can transcode if needed.
env[u'wsgi.url_encoding'] = u'ISO-8859-1'
for key in [u"PATH_INFO", u"SCRIPT_NAME", u"QUERY_STRING"]:
env[key] = env_10[str(key)].decode(env[u'wsgi.url_encoding'])
for k, v in sorted(env.items()):
if isinstance(v, str) and k not in ('REQUEST_URI', 'wsgi.input'):
env[k] = v.decode('ISO-8859-1')
return env
wsgi_gateways = {
(1, 0): WSGIGateway_10,
('u', 0): WSGIGateway_u0,
}
class WSGIPathInfoDispatcher(object):
"""A WSGI dispatcher for dispatch based on the PATH_INFO.
apps: a dict or list of (path_prefix, app) pairs.
"""
def __init__(self, apps):
try:
apps = list(apps.items())
except AttributeError:
pass
# Sort the apps by len(path), descending
apps.sort(cmp=lambda x, y: cmp(len(x[0]), len(y[0])))
apps.reverse()
# The path_prefix strings must start, but not end, with a slash.
# Use "" instead of "/".
self.apps = [(p.rstrip("/"), a) for p, a in apps]
def __call__(self, environ, start_response):
path = environ["PATH_INFO"] or "/"
for p, app in self.apps:
# The apps list should be sorted by length, descending.
if path.startswith(p + "/") or path == p:
environ = environ.copy()
environ["SCRIPT_NAME"] = environ["SCRIPT_NAME"] + p
environ["PATH_INFO"] = path[len(p):]
return app(environ, start_response)
start_response('404 Not Found', [('Content-Type', 'text/plain'),
('Content-Length', '0')])
return ['']
webpy/web/wsgiserver/__init__.py 0000644 0001750 0001750 00000001104 13146625266 015435 0 ustar wmb wmb __all__ = ['HTTPRequest', 'HTTPConnection', 'HTTPServer',
'SizeCheckWrapper', 'KnownLengthRFile', 'ChunkedRFile',
'MaxSizeExceeded', 'NoSSLError', 'FatalSSLAlert',
'WorkerThread', 'ThreadPool', 'SSLAdapter',
'CherryPyWSGIServer',
'Gateway', 'WSGIGateway', 'WSGIGateway_10', 'WSGIGateway_u0',
'WSGIPathInfoDispatcher', 'get_ssl_adapter_class']
import sys
if sys.version_info < (3, 0):
from wsgiserver2 import *
else:
# Le sigh. Boo for backward-incompatible syntax.
exec('from .wsgiserver3 import *')
webpy/web/wsgiserver/ssl_pyopenssl.py 0000644 0001750 0001750 00000022007 13146625266 016620 0 ustar wmb wmb """A library for integrating pyOpenSSL with CherryPy.
The OpenSSL module must be importable for SSL functionality.
You can obtain it from `here `_.
To use this module, set CherryPyWSGIServer.ssl_adapter to an instance of
SSLAdapter. There are two ways to use SSL:
Method One
----------
* ``ssl_adapter.context``: an instance of SSL.Context.
If this is not None, it is assumed to be an SSL.Context instance,
and will be passed to SSL.Connection on bind(). The developer is
responsible for forming a valid Context object. This approach is
to be preferred for more flexibility, e.g. if the cert and key are
streams instead of files, or need decryption, or SSL.SSLv3_METHOD
is desired instead of the default SSL.SSLv23_METHOD, etc. Consult
the pyOpenSSL documentation for complete options.
Method Two (shortcut)
---------------------
* ``ssl_adapter.certificate``: the filename of the server SSL certificate.
* ``ssl_adapter.private_key``: the filename of the server's private key file.
Both are None by default. If ssl_adapter.context is None, but .private_key
and .certificate are both given and valid, they will be read, and the
context will be automatically created from them.
"""
import socket
import threading
import time
from cherrypy import wsgiserver
try:
from OpenSSL import SSL
from OpenSSL import crypto
except ImportError:
SSL = None
class SSL_fileobject(wsgiserver.CP_fileobject):
"""SSL file object attached to a socket object."""
ssl_timeout = 3
ssl_retry = .01
def _safe_call(self, is_reader, call, *args, **kwargs):
"""Wrap the given call with SSL error-trapping.
is_reader: if False EOF errors will be raised. If True, EOF errors
will return "" (to emulate normal sockets).
"""
start = time.time()
while True:
try:
return call(*args, **kwargs)
except SSL.WantReadError:
# Sleep and try again. This is dangerous, because it means
# the rest of the stack has no way of differentiating
# between a "new handshake" error and "client dropped".
# Note this isn't an endless loop: there's a timeout below.
time.sleep(self.ssl_retry)
except SSL.WantWriteError:
time.sleep(self.ssl_retry)
except SSL.SysCallError as e:
if is_reader and e.args == (-1, 'Unexpected EOF'):
return ""
errnum = e.args[0]
if is_reader and errnum in wsgiserver.socket_errors_to_ignore:
return ""
raise socket.error(errnum)
except SSL.Error as e:
if is_reader and e.args == (-1, 'Unexpected EOF'):
return ""
thirdarg = None
try:
thirdarg = e.args[0][0][2]
except IndexError:
pass
if thirdarg == 'http request':
# The client is talking HTTP to an HTTPS server.
raise wsgiserver.NoSSLError()
raise wsgiserver.FatalSSLAlert(*e.args)
except:
raise
if time.time() - start > self.ssl_timeout:
raise socket.timeout("timed out")
def recv(self, size):
return self._safe_call(True, super(SSL_fileobject, self).recv, size)
def sendall(self, *args, **kwargs):
return self._safe_call(False, super(SSL_fileobject, self).sendall,
*args, **kwargs)
def send(self, *args, **kwargs):
return self._safe_call(False, super(SSL_fileobject, self).send,
*args, **kwargs)
class SSLConnection:
"""A thread-safe wrapper for an SSL.Connection.
``*args``: the arguments to create the wrapped ``SSL.Connection(*args)``.
"""
def __init__(self, *args):
self._ssl_conn = SSL.Connection(*args)
self._lock = threading.RLock()
for f in ('get_context', 'pending', 'send', 'write', 'recv', 'read',
'renegotiate', 'bind', 'listen', 'connect', 'accept',
'setblocking', 'fileno', 'close', 'get_cipher_list',
'getpeername', 'getsockname', 'getsockopt', 'setsockopt',
'makefile', 'get_app_data', 'set_app_data', 'state_string',
'sock_shutdown', 'get_peer_certificate', 'want_read',
'want_write', 'set_connect_state', 'set_accept_state',
'connect_ex', 'sendall', 'settimeout', 'gettimeout'):
exec("""def %s(self, *args):
self._lock.acquire()
try:
return self._ssl_conn.%s(*args)
finally:
self._lock.release()
""" % (f, f))
def shutdown(self, *args):
self._lock.acquire()
try:
# pyOpenSSL.socket.shutdown takes no args
return self._ssl_conn.shutdown()
finally:
self._lock.release()
class pyOpenSSLAdapter(wsgiserver.SSLAdapter):
"""A wrapper for integrating pyOpenSSL with CherryPy."""
context = None
"""An instance of SSL.Context."""
certificate = None
"""The filename of the server SSL certificate."""
private_key = None
"""The filename of the server's private key file."""
certificate_chain = None
"""Optional. The filename of CA's intermediate certificate bundle.
This is needed for cheaper "chained root" SSL certificates, and should be
left as None if not required."""
def __init__(self, certificate, private_key, certificate_chain=None):
if SSL is None:
raise ImportError("You must install pyOpenSSL to use HTTPS.")
self.context = None
self.certificate = certificate
self.private_key = private_key
self.certificate_chain = certificate_chain
self._environ = None
def bind(self, sock):
"""Wrap and return the given socket."""
if self.context is None:
self.context = self.get_context()
conn = SSLConnection(self.context, sock)
self._environ = self.get_environ()
return conn
def wrap(self, sock):
"""Wrap and return the given socket, plus WSGI environ entries."""
return sock, self._environ.copy()
def get_context(self):
"""Return an SSL.Context from self attributes."""
# See http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/442473
c = SSL.Context(SSL.SSLv23_METHOD)
c.use_privatekey_file(self.private_key)
if self.certificate_chain:
c.load_verify_locations(self.certificate_chain)
c.use_certificate_file(self.certificate)
return c
def get_environ(self):
"""Return WSGI environ entries to be merged into each request."""
ssl_environ = {
"HTTPS": "on",
# pyOpenSSL doesn't provide access to any of these AFAICT
# 'SSL_PROTOCOL': 'SSLv2',
# SSL_CIPHER string The cipher specification name
# SSL_VERSION_INTERFACE string The mod_ssl program version
# SSL_VERSION_LIBRARY string The OpenSSL program version
}
if self.certificate:
# Server certificate attributes
cert = open(self.certificate, 'rb').read()
cert = crypto.load_certificate(crypto.FILETYPE_PEM, cert)
ssl_environ.update({
'SSL_SERVER_M_VERSION': cert.get_version(),
'SSL_SERVER_M_SERIAL': cert.get_serial_number(),
# 'SSL_SERVER_V_START':
# Validity of server's certificate (start time),
# 'SSL_SERVER_V_END':
# Validity of server's certificate (end time),
})
for prefix, dn in [("I", cert.get_issuer()),
("S", cert.get_subject())]:
# X509Name objects don't seem to have a way to get the
# complete DN string. Use str() and slice it instead,
# because str(dn) == ""
dnstr = str(dn)[18:-2]
wsgikey = 'SSL_SERVER_%s_DN' % prefix
ssl_environ[wsgikey] = dnstr
# The DN should be of the form: /k1=v1/k2=v2, but we must allow
# for any value to contain slashes itself (in a URL).
while dnstr:
pos = dnstr.rfind("=")
dnstr, value = dnstr[:pos], dnstr[pos + 1:]
pos = dnstr.rfind("/")
dnstr, key = dnstr[:pos], dnstr[pos + 1:]
if key and value:
wsgikey = 'SSL_SERVER_%s_DN_%s' % (prefix, key)
ssl_environ[wsgikey] = value
return ssl_environ
def makefile(self, sock, mode='r', bufsize=-1):
if SSL and isinstance(sock, SSL.ConnectionType):
timeout = sock.gettimeout()
f = SSL_fileobject(sock, mode, bufsize)
f.ssl_timeout = timeout
return f
else:
return wsgiserver.CP_fileobject(sock, mode, bufsize)
webpy/web/wsgiserver/ssl_builtin.py 0000644 0001750 0001750 00000007700 13146625266 016235 0 ustar wmb wmb """A library for integrating Python's builtin ``ssl`` library with CherryPy.
The ssl module must be importable for SSL functionality.
To use this module, set ``CherryPyWSGIServer.ssl_adapter`` to an instance of
``BuiltinSSLAdapter``.
"""
try:
import ssl
except ImportError:
ssl = None
try:
from _pyio import DEFAULT_BUFFER_SIZE
except ImportError:
try:
from io import DEFAULT_BUFFER_SIZE
except ImportError:
DEFAULT_BUFFER_SIZE = -1
import sys
from cherrypy import wsgiserver
class BuiltinSSLAdapter(wsgiserver.SSLAdapter):
"""A wrapper for integrating Python's builtin ssl module with CherryPy."""
certificate = None
"""The filename of the server SSL certificate."""
private_key = None
"""The filename of the server's private key file."""
certificate_chain = None
"""The filename of the certificate chain file."""
"""The ssl.SSLContext that will be used to wrap sockets where available
(on Python > 2.7.9 / 3.3)
"""
context = None
def __init__(self, certificate, private_key, certificate_chain=None):
if ssl is None:
raise ImportError("You must install the ssl module to use HTTPS.")
self.certificate = certificate
self.private_key = private_key
self.certificate_chain = certificate_chain
if hasattr(ssl, 'create_default_context'):
self.context = ssl.create_default_context(
purpose=ssl.Purpose.CLIENT_AUTH,
cafile=certificate_chain
)
self.context.load_cert_chain(certificate, private_key)
def bind(self, sock):
"""Wrap and return the given socket."""
return sock
def wrap(self, sock):
"""Wrap and return the given socket, plus WSGI environ entries."""
try:
if self.context is not None:
s = self.context.wrap_socket(sock,do_handshake_on_connect=True,
server_side=True)
else:
s = ssl.wrap_socket(sock, do_handshake_on_connect=True,
server_side=True, certfile=self.certificate,
keyfile=self.private_key,
ssl_version=ssl.PROTOCOL_SSLv23,
ca_certs=self.certificate_chain)
except ssl.SSLError:
e = sys.exc_info()[1]
if e.errno == ssl.SSL_ERROR_EOF:
# This is almost certainly due to the cherrypy engine
# 'pinging' the socket to assert it's connectable;
# the 'ping' isn't SSL.
return None, {}
elif e.errno == ssl.SSL_ERROR_SSL:
if e.args[1].endswith('http request'):
# The client is speaking HTTP to an HTTPS server.
raise wsgiserver.NoSSLError
elif e.args[1].endswith('unknown protocol'):
# The client is speaking some non-HTTP protocol.
# Drop the conn.
return None, {}
raise
return s, self.get_environ(s)
# TODO: fill this out more with mod ssl env
def get_environ(self, sock):
"""Create WSGI environ entries to be merged into each request."""
cipher = sock.cipher()
ssl_environ = {
"wsgi.url_scheme": "https",
"HTTPS": "on",
'SSL_PROTOCOL': cipher[1],
'SSL_CIPHER': cipher[0]
# SSL_VERSION_INTERFACE string The mod_ssl program version
# SSL_VERSION_LIBRARY string The OpenSSL program version
}
return ssl_environ
if sys.version_info >= (3, 0):
def makefile(self, sock, mode='r', bufsize=DEFAULT_BUFFER_SIZE):
return wsgiserver.CP_makefile(sock, mode, bufsize)
else:
def makefile(self, sock, mode='r', bufsize=DEFAULT_BUFFER_SIZE):
return wsgiserver.CP_fileobject(sock, mode, bufsize)
webpy/web/wsgiserver/wsgiserver3.py 0000644 0001750 0001750 00000233237 13146625266 016177 0 ustar wmb wmb """A high-speed, production ready, thread pooled, generic HTTP server.
Simplest example on how to use this module directly
(without using CherryPy's application machinery)::
from cherrypy import wsgiserver
def my_crazy_app(environ, start_response):
status = '200 OK'
response_headers = [('Content-type','text/plain')]
start_response(status, response_headers)
return ['Hello world!']
server = wsgiserver.CherryPyWSGIServer(
('0.0.0.0', 8070), my_crazy_app,
server_name='www.cherrypy.example')
server.start()
The CherryPy WSGI server can serve as many WSGI applications
as you want in one instance by using a WSGIPathInfoDispatcher::
d = WSGIPathInfoDispatcher({'/': my_crazy_app, '/blog': my_blog_app})
server = wsgiserver.CherryPyWSGIServer(('0.0.0.0', 80), d)
Want SSL support? Just set server.ssl_adapter to an SSLAdapter instance.
This won't call the CherryPy engine (application side) at all, only the
HTTP server, which is independent from the rest of CherryPy. Don't
let the name "CherryPyWSGIServer" throw you; the name merely reflects
its origin, not its coupling.
For those of you wanting to understand internals of this module, here's the
basic call flow. The server's listening thread runs a very tight loop,
sticking incoming connections onto a Queue::
server = CherryPyWSGIServer(...)
server.start()
while True:
tick()
# This blocks until a request comes in:
child = socket.accept()
conn = HTTPConnection(child, ...)
server.requests.put(conn)
Worker threads are kept in a pool and poll the Queue, popping off and then
handling each connection in turn. Each connection can consist of an arbitrary
number of requests and their responses, so we run a nested loop::
while True:
conn = server.requests.get()
conn.communicate()
-> while True:
req = HTTPRequest(...)
req.parse_request()
-> # Read the Request-Line, e.g. "GET /page HTTP/1.1"
req.rfile.readline()
read_headers(req.rfile, req.inheaders)
req.respond()
-> response = app(...)
try:
for chunk in response:
if chunk:
req.write(chunk)
finally:
if hasattr(response, "close"):
response.close()
if req.close_connection:
return
"""
__all__ = ['HTTPRequest', 'HTTPConnection', 'HTTPServer',
'SizeCheckWrapper', 'KnownLengthRFile', 'ChunkedRFile',
'CP_makefile',
'MaxSizeExceeded', 'NoSSLError', 'FatalSSLAlert',
'WorkerThread', 'ThreadPool', 'SSLAdapter',
'CherryPyWSGIServer',
'Gateway', 'WSGIGateway', 'WSGIGateway_10', 'WSGIGateway_u0',
'WSGIPathInfoDispatcher', 'get_ssl_adapter_class',
'socket_errors_to_ignore']
import os
try:
import queue
except:
import Queue as queue
import re
import email.utils
import socket
import sys
import threading
import time
import traceback as traceback_
import errno
import logging
from urllib.parse import urlparse
try:
# prefer slower Python-based io module
import _pyio as io
except ImportError:
# Python 2.6
import io
try:
import pkg_resources
except ImportError:
pass
if 'win' in sys.platform and hasattr(socket, "AF_INET6"):
if not hasattr(socket, 'IPPROTO_IPV6'):
socket.IPPROTO_IPV6 = 41
if not hasattr(socket, 'IPV6_V6ONLY'):
socket.IPV6_V6ONLY = 27
DEFAULT_BUFFER_SIZE = io.DEFAULT_BUFFER_SIZE
try:
cp_version = pkg_resources.require('cherrypy')[0].version
except Exception:
cp_version = 'unknown'
if sys.version_info >= (3, 0):
unicodestr = str
basestring = (bytes, str)
def ntob(n, encoding='ISO-8859-1'):
"""Return the given native string as a byte string in the given
encoding.
"""
# In Python 3, the native string type is unicode
return n.encode(encoding)
else:
unicodestr = unicode
basestring = basestring
def ntob(n, encoding='ISO-8859-1'):
"""Return the given native string as a byte string in the given
encoding.
"""
# In Python 2, the native string type is bytes. Assume it's already
# in the given encoding, which for ISO-8859-1 is almost always what
# was intended.
return n
LF = ntob('\n')
CRLF = ntob('\r\n')
TAB = ntob('\t')
SPACE = ntob(' ')
COLON = ntob(':')
SEMICOLON = ntob(';')
EMPTY = ntob('')
NUMBER_SIGN = ntob('#')
QUESTION_MARK = ntob('?')
ASTERISK = ntob('*')
FORWARD_SLASH = ntob('/')
quoted_slash = re.compile(ntob("(?i)%2F"))
def plat_specific_errors(*errnames):
"""Return error numbers for all errors in errnames on this platform.
The 'errno' module contains different global constants depending on
the specific platform (OS). This function will return the list of
numeric values for a given list of potential names.
"""
errno_names = dir(errno)
nums = [getattr(errno, k) for k in errnames if k in errno_names]
# de-dupe the list
return list(dict.fromkeys(nums).keys())
socket_error_eintr = plat_specific_errors("EINTR", "WSAEINTR")
socket_errors_to_ignore = plat_specific_errors(
"EPIPE",
"EBADF", "WSAEBADF",
"ENOTSOCK", "WSAENOTSOCK",
"ETIMEDOUT", "WSAETIMEDOUT",
"ECONNREFUSED", "WSAECONNREFUSED",
"ECONNRESET", "WSAECONNRESET",
"ECONNABORTED", "WSAECONNABORTED",
"ENETRESET", "WSAENETRESET",
"EHOSTDOWN", "EHOSTUNREACH",
)
socket_errors_to_ignore.append("timed out")
socket_errors_to_ignore.append("The read operation timed out")
if sys.platform == 'darwin':
socket_errors_to_ignore.append(plat_specific_errors("EPROTOTYPE"))
socket_errors_nonblocking = plat_specific_errors(
'EAGAIN', 'EWOULDBLOCK', 'WSAEWOULDBLOCK')
comma_separated_headers = [
ntob(h) for h in
['Accept', 'Accept-Charset', 'Accept-Encoding',
'Accept-Language', 'Accept-Ranges', 'Allow', 'Cache-Control',
'Connection', 'Content-Encoding', 'Content-Language', 'Expect',
'If-Match', 'If-None-Match', 'Pragma', 'Proxy-Authenticate', 'TE',
'Trailer', 'Transfer-Encoding', 'Upgrade', 'Vary', 'Via', 'Warning',
'WWW-Authenticate']
]
if not hasattr(logging, 'statistics'):
logging.statistics = {}
def read_headers(rfile, hdict=None):
"""Read headers from the given stream into the given header dict.
If hdict is None, a new header dict is created. Returns the populated
header dict.
Headers which are repeated are folded together using a comma if their
specification so dictates.
This function raises ValueError when the read bytes violate the HTTP spec.
You should probably return "400 Bad Request" if this happens.
"""
if hdict is None:
hdict = {}
while True:
line = rfile.readline()
if not line:
# No more data--illegal end of headers
raise ValueError("Illegal end of headers.")
if line == CRLF:
# Normal end of headers
break
if not line.endswith(CRLF):
raise ValueError("HTTP requires CRLF terminators")
if line[0] in (SPACE, TAB):
# It's a continuation line.
v = line.strip()
else:
try:
k, v = line.split(COLON, 1)
except ValueError:
raise ValueError("Illegal header line.")
# TODO: what about TE and WWW-Authenticate?
k = k.strip().title()
v = v.strip()
hname = k
if k in comma_separated_headers:
existing = hdict.get(hname)
if existing:
v = b", ".join((existing, v))
hdict[hname] = v
return hdict
class MaxSizeExceeded(Exception):
pass
class SizeCheckWrapper(object):
"""Wraps a file-like object, raising MaxSizeExceeded if too large."""
def __init__(self, rfile, maxlen):
self.rfile = rfile
self.maxlen = maxlen
self.bytes_read = 0
def _check_length(self):
if self.maxlen and self.bytes_read > self.maxlen:
raise MaxSizeExceeded()
def read(self, size=None):
data = self.rfile.read(size)
self.bytes_read += len(data)
self._check_length()
return data
def readline(self, size=None):
if size is not None:
data = self.rfile.readline(size)
self.bytes_read += len(data)
self._check_length()
return data
# User didn't specify a size ...
# We read the line in chunks to make sure it's not a 100MB line !
res = []
while True:
data = self.rfile.readline(256)
self.bytes_read += len(data)
self._check_length()
res.append(data)
# See https://github.com/cherrypy/cherrypy/issues/421
if len(data) < 256 or data[-1:] == LF:
return EMPTY.join(res)
def readlines(self, sizehint=0):
# Shamelessly stolen from StringIO
total = 0
lines = []
line = self.readline()
while line:
lines.append(line)
total += len(line)
if 0 < sizehint <= total:
break
line = self.readline()
return lines
def close(self):
self.rfile.close()
def __iter__(self):
return self
def __next__(self):
data = next(self.rfile)
self.bytes_read += len(data)
self._check_length()
return data
def next(self):
data = self.rfile.next()
self.bytes_read += len(data)
self._check_length()
return data
class KnownLengthRFile(object):
"""Wraps a file-like object, returning an empty string when exhausted."""
def __init__(self, rfile, content_length):
self.rfile = rfile
self.remaining = content_length
def read(self, size=None):
if self.remaining == 0:
return b''
if size is None:
size = self.remaining
else:
size = min(size, self.remaining)
data = self.rfile.read(size)
self.remaining -= len(data)
return data
def readline(self, size=None):
if self.remaining == 0:
return b''
if size is None:
size = self.remaining
else:
size = min(size, self.remaining)
data = self.rfile.readline(size)
self.remaining -= len(data)
return data
def readlines(self, sizehint=0):
# Shamelessly stolen from StringIO
total = 0
lines = []
line = self.readline(sizehint)
while line:
lines.append(line)
total += len(line)
if 0 < sizehint <= total:
break
line = self.readline(sizehint)
return lines
def close(self):
self.rfile.close()
def __iter__(self):
return self
def __next__(self):
data = next(self.rfile)
self.remaining -= len(data)
return data
class ChunkedRFile(object):
"""Wraps a file-like object, returning an empty string when exhausted.
This class is intended to provide a conforming wsgi.input value for
request entities that have been encoded with the 'chunked' transfer
encoding.
"""
def __init__(self, rfile, maxlen, bufsize=8192):
self.rfile = rfile
self.maxlen = maxlen
self.bytes_read = 0
self.buffer = EMPTY
self.bufsize = bufsize
self.closed = False
def _fetch(self):
if self.closed:
return
line = self.rfile.readline()
self.bytes_read += len(line)
if self.maxlen and self.bytes_read > self.maxlen:
raise MaxSizeExceeded("Request Entity Too Large", self.maxlen)
line = line.strip().split(SEMICOLON, 1)
try:
chunk_size = line.pop(0)
chunk_size = int(chunk_size, 16)
except ValueError:
raise ValueError("Bad chunked transfer size: " + repr(chunk_size))
if chunk_size <= 0:
self.closed = True
return
## if line: chunk_extension = line[0]
if self.maxlen and self.bytes_read + chunk_size > self.maxlen:
raise IOError("Request Entity Too Large")
chunk = self.rfile.read(chunk_size)
self.bytes_read += len(chunk)
self.buffer += chunk
crlf = self.rfile.read(2)
if crlf != CRLF:
raise ValueError(
"Bad chunked transfer coding (expected '\\r\\n', "
"got " + repr(crlf) + ")")
def read(self, size=None):
data = EMPTY
while True:
if size and len(data) >= size:
return data
if not self.buffer:
self._fetch()
if not self.buffer:
# EOF
return data
if size:
remaining = size - len(data)
data += self.buffer[:remaining]
self.buffer = self.buffer[remaining:]
else:
data += self.buffer
def readline(self, size=None):
data = EMPTY
while True:
if size and len(data) >= size:
return data
if not self.buffer:
self._fetch()
if not self.buffer:
# EOF
return data
newline_pos = self.buffer.find(LF)
if size:
if newline_pos == -1:
remaining = size - len(data)
data += self.buffer[:remaining]
self.buffer = self.buffer[remaining:]
else:
remaining = min(size - len(data), newline_pos)
data += self.buffer[:remaining]
self.buffer = self.buffer[remaining:]
else:
if newline_pos == -1:
data += self.buffer
else:
data += self.buffer[:newline_pos]
self.buffer = self.buffer[newline_pos:]
def readlines(self, sizehint=0):
# Shamelessly stolen from StringIO
total = 0
lines = []
line = self.readline(sizehint)
while line:
lines.append(line)
total += len(line)
if 0 < sizehint <= total:
break
line = self.readline(sizehint)
return lines
def read_trailer_lines(self):
if not self.closed:
raise ValueError(
"Cannot read trailers until the request body has been read.")
while True:
line = self.rfile.readline()
if not line:
# No more data--illegal end of headers
raise ValueError("Illegal end of headers.")
self.bytes_read += len(line)
if self.maxlen and self.bytes_read > self.maxlen:
raise IOError("Request Entity Too Large")
if line == CRLF:
# Normal end of headers
break
if not line.endswith(CRLF):
raise ValueError("HTTP requires CRLF terminators")
yield line
def close(self):
self.rfile.close()
def __iter__(self):
# Shamelessly stolen from StringIO
total = 0
line = self.readline(sizehint)
while line:
yield line
total += len(line)
if 0 < sizehint <= total:
break
line = self.readline(sizehint)
class HTTPRequest(object):
"""An HTTP Request (and response).
A single HTTP connection may consist of multiple request/response pairs.
"""
server = None
"""The HTTPServer object which is receiving this request."""
conn = None
"""The HTTPConnection object on which this request connected."""
inheaders = {}
"""A dict of request headers."""
outheaders = []
"""A list of header tuples to write in the response."""
ready = False
"""When True, the request has been parsed and is ready to begin generating
the response. When False, signals the calling Connection that the response
should not be generated and the connection should close."""
close_connection = False
"""Signals the calling Connection that the request should close. This does
not imply an error! The client and/or server may each request that the
connection be closed."""
chunked_write = False
"""If True, output will be encoded with the "chunked" transfer-coding.
This value is set automatically inside send_headers."""
def __init__(self, server, conn):
self.server = server
self.conn = conn
self.ready = False
self.started_request = False
self.scheme = ntob("http")
if self.server.ssl_adapter is not None:
self.scheme = ntob("https")
# Use the lowest-common protocol in case read_request_line errors.
self.response_protocol = 'HTTP/1.0'
self.inheaders = {}
self.status = ""
self.outheaders = []
self.sent_headers = False
self.close_connection = self.__class__.close_connection
self.chunked_read = False
self.chunked_write = self.__class__.chunked_write
def parse_request(self):
"""Parse the next HTTP request start-line and message-headers."""
self.rfile = SizeCheckWrapper(self.conn.rfile,
self.server.max_request_header_size)
try:
success = self.read_request_line()
except MaxSizeExceeded:
self.simple_response(
"414 Request-URI Too Long",
"The Request-URI sent with the request exceeds the maximum "
"allowed bytes.")
return
else:
if not success:
return
try:
success = self.read_request_headers()
except MaxSizeExceeded:
self.simple_response(
"413 Request Entity Too Large",
"The headers sent with the request exceed the maximum "
"allowed bytes.")
return
else:
if not success:
return
self.ready = True
def read_request_line(self):
# HTTP/1.1 connections are persistent by default. If a client
# requests a page, then idles (leaves the connection open),
# then rfile.readline() will raise socket.error("timed out").
# Note that it does this based on the value given to settimeout(),
# and doesn't need the client to request or acknowledge the close
# (although your TCP stack might suffer for it: cf Apache's history
# with FIN_WAIT_2).
request_line = self.rfile.readline()
# Set started_request to True so communicate() knows to send 408
# from here on out.
self.started_request = True
if not request_line:
return False
if request_line == CRLF:
# RFC 2616 sec 4.1: "...if the server is reading the protocol
# stream at the beginning of a message and receives a CRLF
# first, it should ignore the CRLF."
# But only ignore one leading line! else we enable a DoS.
request_line = self.rfile.readline()
if not request_line:
return False
if not request_line.endswith(CRLF):
self.simple_response(
"400 Bad Request", "HTTP requires CRLF terminators")
return False
try:
method, uri, req_protocol = request_line.strip().split(SPACE, 2)
# The [x:y] slicing is necessary for byte strings to avoid getting
# ord's
rp = int(req_protocol[5:6]), int(req_protocol[7:8])
except ValueError:
self.simple_response("400 Bad Request", "Malformed Request-Line")
return False
self.uri = uri
self.method = method
# uri may be an abs_path (including "http://host.domain.tld");
scheme, authority, path = self.parse_request_uri(uri)
if path is None:
self.simple_response("400 Bad Request",
"Invalid path in Request-URI.")
return False
if NUMBER_SIGN in path:
self.simple_response("400 Bad Request",
"Illegal #fragment in Request-URI.")
return False
if scheme:
self.scheme = scheme
qs = EMPTY
if QUESTION_MARK in path:
path, qs = path.split(QUESTION_MARK, 1)
# Unquote the path+params (e.g. "/this%20path" -> "/this path").
# http://www.w3.org/Protocols/rfc2616/rfc2616-sec5.html#sec5.1.2
#
# But note that "...a URI must be separated into its components
# before the escaped characters within those components can be
# safely decoded." http://www.ietf.org/rfc/rfc2396.txt, sec 2.4.2
# Therefore, "/this%2Fpath" becomes "/this%2Fpath", not "/this/path".
try:
atoms = [self.unquote_bytes(x) for x in quoted_slash.split(path)]
except ValueError:
ex = sys.exc_info()[1]
self.simple_response("400 Bad Request", ex.args[0])
return False
path = b"%2F".join(atoms)
self.path = path
# Note that, like wsgiref and most other HTTP servers,
# we "% HEX HEX"-unquote the path but not the query string.
self.qs = qs
# Compare request and server HTTP protocol versions, in case our
# server does not support the requested protocol. Limit our output
# to min(req, server). We want the following output:
# request server actual written supported response
# protocol protocol response protocol feature set
# a 1.0 1.0 1.0 1.0
# b 1.0 1.1 1.1 1.0
# c 1.1 1.0 1.0 1.0
# d 1.1 1.1 1.1 1.1
# Notice that, in (b), the response will be "HTTP/1.1" even though
# the client only understands 1.0. RFC 2616 10.5.6 says we should
# only return 505 if the _major_ version is different.
# The [x:y] slicing is necessary for byte strings to avoid getting
# ord's
sp = int(self.server.protocol[5:6]), int(self.server.protocol[7:8])
if sp[0] != rp[0]:
self.simple_response("505 HTTP Version Not Supported")
return False
self.request_protocol = req_protocol
self.response_protocol = "HTTP/%s.%s" % min(rp, sp)
return True
def read_request_headers(self):
"""Read self.rfile into self.inheaders. Return success."""
# then all the http headers
try:
read_headers(self.rfile, self.inheaders)
except ValueError:
ex = sys.exc_info()[1]
self.simple_response("400 Bad Request", ex.args[0])
return False
mrbs = self.server.max_request_body_size
if mrbs and int(self.inheaders.get(b"Content-Length", 0)) > mrbs:
self.simple_response(
"413 Request Entity Too Large",
"The entity sent with the request exceeds the maximum "
"allowed bytes.")
return False
# Persistent connection support
if self.response_protocol == "HTTP/1.1":
# Both server and client are HTTP/1.1
if self.inheaders.get(b"Connection", b"") == b"close":
self.close_connection = True
else:
# Either the server or client (or both) are HTTP/1.0
if self.inheaders.get(b"Connection", b"") != b"Keep-Alive":
self.close_connection = True
# Transfer-Encoding support
te = None
if self.response_protocol == "HTTP/1.1":
te = self.inheaders.get(b"Transfer-Encoding")
if te:
te = [x.strip().lower() for x in te.split(b",") if x.strip()]
self.chunked_read = False
if te:
for enc in te:
if enc == b"chunked":
self.chunked_read = True
else:
# Note that, even if we see "chunked", we must reject
# if there is an extension we don't recognize.
self.simple_response("501 Unimplemented")
self.close_connection = True
return False
# From PEP 333:
# "Servers and gateways that implement HTTP 1.1 must provide
# transparent support for HTTP 1.1's "expect/continue" mechanism.
# This may be done in any of several ways:
# 1. Respond to requests containing an Expect: 100-continue request
# with an immediate "100 Continue" response, and proceed normally.
# 2. Proceed with the request normally, but provide the application
# with a wsgi.input stream that will send the "100 Continue"
# response if/when the application first attempts to read from
# the input stream. The read request must then remain blocked
# until the client responds.
# 3. Wait until the client decides that the server does not support
# expect/continue, and sends the request body on its own.
# (This is suboptimal, and is not recommended.)
#
# We used to do 3, but are now doing 1. Maybe we'll do 2 someday,
# but it seems like it would be a big slowdown for such a rare case.
if self.inheaders.get(b"Expect", b"") == b"100-continue":
# Don't use simple_response here, because it emits headers
# we don't want. See
# https://github.com/cherrypy/cherrypy/issues/951
msg = self.server.protocol.encode(
'ascii') + b" 100 Continue\r\n\r\n"
try:
self.conn.wfile.write(msg)
except socket.error:
x = sys.exc_info()[1]
if x.args[0] not in socket_errors_to_ignore:
raise
return True
def parse_request_uri(self, uri):
"""Parse a Request-URI into (scheme, authority, path).
Note that Request-URI's must be one of::
Request-URI = "*" | absoluteURI | abs_path | authority
Therefore, a Request-URI which starts with a double forward-slash
cannot be a "net_path"::
net_path = "//" authority [ abs_path ]
Instead, it must be interpreted as an "abs_path" with an empty first
path segment::
abs_path = "/" path_segments
path_segments = segment *( "/" segment )
segment = *pchar *( ";" param )
param = *pchar
"""
if uri == ASTERISK:
return None, None, uri
scheme, authority, path, params, query, fragment = urlparse(uri)
if scheme and QUESTION_MARK not in scheme:
# An absoluteURI.
# If there's a scheme (and it must be http or https), then:
# http_URL = "http:" "//" host [ ":" port ] [ abs_path [ "?" query
# ]]
return scheme, authority, path
if uri.startswith(FORWARD_SLASH):
# An abs_path.
return None, None, uri
else:
# An authority.
return None, uri, None
def unquote_bytes(self, path):
"""takes quoted string and unquotes % encoded values"""
res = path.split(b'%')
for i in range(1, len(res)):
item = res[i]
try:
res[i] = bytes([int(item[:2], 16)]) + item[2:]
except ValueError:
raise
return b''.join(res)
def respond(self):
"""Call the gateway and write its iterable output."""
mrbs = self.server.max_request_body_size
if self.chunked_read:
self.rfile = ChunkedRFile(self.conn.rfile, mrbs)
else:
cl = int(self.inheaders.get(b"Content-Length", 0))
if mrbs and mrbs < cl:
if not self.sent_headers:
self.simple_response(
"413 Request Entity Too Large",
"The entity sent with the request exceeds the "
"maximum allowed bytes.")
return
self.rfile = KnownLengthRFile(self.conn.rfile, cl)
self.server.gateway(self).respond()
if (self.ready and not self.sent_headers):
self.sent_headers = True
self.send_headers()
if self.chunked_write:
self.conn.wfile.write(b"0\r\n\r\n")
def simple_response(self, status, msg=""):
"""Write a simple response back to the client."""
status = str(status)
buf = [bytes(self.server.protocol, "ascii") + SPACE +
bytes(status, "ISO-8859-1") + CRLF,
bytes("Content-Length: %s\r\n" % len(msg), "ISO-8859-1"),
b"Content-Type: text/plain\r\n"]
if status[:3] in ("413", "414"):
# Request Entity Too Large / Request-URI Too Long
self.close_connection = True
if self.response_protocol == 'HTTP/1.1':
# This will not be true for 414, since read_request_line
# usually raises 414 before reading the whole line, and we
# therefore cannot know the proper response_protocol.
buf.append(b"Connection: close\r\n")
else:
# HTTP/1.0 had no 413/414 status nor Connection header.
# Emit 400 instead and trust the message body is enough.
status = "400 Bad Request"
buf.append(CRLF)
if msg:
if isinstance(msg, unicodestr):
msg = msg.encode("ISO-8859-1")
buf.append(msg)
try:
self.conn.wfile.write(b"".join(buf))
except socket.error:
x = sys.exc_info()[1]
if x.args[0] not in socket_errors_to_ignore:
raise
def write(self, chunk):
"""Write unbuffered data to the client."""
if self.chunked_write and chunk:
buf = [bytes(hex(len(chunk)), 'ASCII')[2:], CRLF, chunk, CRLF]
self.conn.wfile.write(EMPTY.join(buf))
else:
self.conn.wfile.write(chunk)
def send_headers(self):
"""Assert, process, and send the HTTP response message-headers.
You must set self.status, and self.outheaders before calling this.
"""
hkeys = [key.lower() for key, value in self.outheaders]
status = int(self.status[:3])
if status == 413:
# Request Entity Too Large. Close conn to avoid garbage.
self.close_connection = True
elif b"content-length" not in hkeys:
# "All 1xx (informational), 204 (no content),
# and 304 (not modified) responses MUST NOT
# include a message-body." So no point chunking.
if status < 200 or status in (204, 205, 304):
pass
else:
if (self.response_protocol == 'HTTP/1.1'
and self.method != b'HEAD'):
# Use the chunked transfer-coding
self.chunked_write = True
self.outheaders.append((b"Transfer-Encoding", b"chunked"))
else:
# Closing the conn is the only way to determine len.
self.close_connection = True
if b"connection" not in hkeys:
if self.response_protocol == 'HTTP/1.1':
# Both server and client are HTTP/1.1 or better
if self.close_connection:
self.outheaders.append((b"Connection", b"close"))
else:
# Server and/or client are HTTP/1.0
if not self.close_connection:
self.outheaders.append((b"Connection", b"Keep-Alive"))
if (not self.close_connection) and (not self.chunked_read):
# Read any remaining request body data on the socket.
# "If an origin server receives a request that does not include an
# Expect request-header field with the "100-continue" expectation,
# the request includes a request body, and the server responds
# with a final status code before reading the entire request body
# from the transport connection, then the server SHOULD NOT close
# the transport connection until it has read the entire request,
# or until the client closes the connection. Otherwise, the client
# might not reliably receive the response message. However, this
# requirement is not be construed as preventing a server from
# defending itself against denial-of-service attacks, or from
# badly broken client implementations."
remaining = getattr(self.rfile, 'remaining', 0)
if remaining > 0:
self.rfile.read(remaining)
if b"date" not in hkeys:
self.outheaders.append((
b"Date",
email.utils.formatdate(usegmt=True).encode('ISO-8859-1')
))
if b"server" not in hkeys:
self.outheaders.append(
(b"Server", self.server.server_name.encode('ISO-8859-1')))
buf = [self.server.protocol.encode(
'ascii') + SPACE + self.status + CRLF]
for k, v in self.outheaders:
buf.append(k + COLON + SPACE + v + CRLF)
buf.append(CRLF)
self.conn.wfile.write(EMPTY.join(buf))
class NoSSLError(Exception):
"""Exception raised when a client speaks HTTP to an HTTPS socket."""
pass
class FatalSSLAlert(Exception):
"""Exception raised when the SSL implementation signals a fatal alert."""
pass
class CP_BufferedWriter(io.BufferedWriter):
"""Faux file object attached to a socket object."""
def write(self, b):
self._checkClosed()
if isinstance(b, str):
raise TypeError("can't write str to binary stream")
with self._write_lock:
self._write_buf.extend(b)
self._flush_unlocked()
return len(b)
def _flush_unlocked(self):
self._checkClosed("flush of closed file")
while self._write_buf:
try:
# ssl sockets only except 'bytes', not bytearrays
# so perhaps we should conditionally wrap this for perf?
n = self.raw.write(bytes(self._write_buf))
except io.BlockingIOError as e:
n = e.characters_written
del self._write_buf[:n]
def CP_makefile(sock, mode='r', bufsize=DEFAULT_BUFFER_SIZE):
if 'r' in mode:
return io.BufferedReader(socket.SocketIO(sock, mode), bufsize)
else:
return CP_BufferedWriter(socket.SocketIO(sock, mode), bufsize)
class HTTPConnection(object):
"""An HTTP connection (active socket).
server: the Server object which received this connection.
socket: the raw socket object (usually TCP) for this connection.
makefile: a fileobject class for reading from the socket.
"""
remote_addr = None
remote_port = None
ssl_env = None
rbufsize = DEFAULT_BUFFER_SIZE
wbufsize = DEFAULT_BUFFER_SIZE
RequestHandlerClass = HTTPRequest
def __init__(self, server, sock, makefile=CP_makefile):
self.server = server
self.socket = sock
self.rfile = makefile(sock, "rb", self.rbufsize)
self.wfile = makefile(sock, "wb", self.wbufsize)
self.requests_seen = 0
def communicate(self):
"""Read each request and respond appropriately."""
request_seen = False
try:
while True:
# (re)set req to None so that if something goes wrong in
# the RequestHandlerClass constructor, the error doesn't
# get written to the previous request.
req = None
req = self.RequestHandlerClass(self.server, self)
# This order of operations should guarantee correct pipelining.
req.parse_request()
if self.server.stats['Enabled']:
self.requests_seen += 1
if not req.ready:
# Something went wrong in the parsing (and the server has
# probably already made a simple_response). Return and
# let the conn close.
return
request_seen = True
req.respond()
if req.close_connection:
return
except socket.error:
e = sys.exc_info()[1]
errnum = e.args[0]
# sadly SSL sockets return a different (longer) time out string
if (
errnum == 'timed out' or
errnum == 'The read operation timed out'
):
# Don't error if we're between requests; only error
# if 1) no request has been started at all, or 2) we're
# in the middle of a request.
# See https://github.com/cherrypy/cherrypy/issues/853
if (not request_seen) or (req and req.started_request):
# Don't bother writing the 408 if the response
# has already started being written.
if req and not req.sent_headers:
try:
req.simple_response("408 Request Timeout")
except FatalSSLAlert:
# Close the connection.
return
elif errnum not in socket_errors_to_ignore:
self.server.error_log("socket.error %s" % repr(errnum),
level=logging.WARNING, traceback=True)
if req and not req.sent_headers:
try:
req.simple_response("500 Internal Server Error")
except FatalSSLAlert:
# Close the connection.
return
return
except (KeyboardInterrupt, SystemExit):
raise
except FatalSSLAlert:
# Close the connection.
return
except NoSSLError:
if req and not req.sent_headers:
# Unwrap our wfile
self.wfile = CP_makefile(
self.socket._sock, "wb", self.wbufsize)
req.simple_response(
"400 Bad Request",
"The client sent a plain HTTP request, but this server "
"only speaks HTTPS on this port.")
self.linger = True
except Exception:
e = sys.exc_info()[1]
self.server.error_log(repr(e), level=logging.ERROR, traceback=True)
if req and not req.sent_headers:
try:
req.simple_response("500 Internal Server Error")
except FatalSSLAlert:
# Close the connection.
return
linger = False
def close(self):
"""Close the socket underlying this connection."""
self.rfile.close()
if not self.linger:
# Python's socket module does NOT call close on the kernel
# socket when you call socket.close(). We do so manually here
# because we want this server to send a FIN TCP segment
# immediately. Note this must be called *before* calling
# socket.close(), because the latter drops its reference to
# the kernel socket.
# Python 3 *probably* fixed this with socket._real_close;
# hard to tell.
# self.socket._sock.close()
self.socket.close()
else:
# On the other hand, sometimes we want to hang around for a bit
# to make sure the client has a chance to read our entire
# response. Skipping the close() calls here delays the FIN
# packet until the socket object is garbage-collected later.
# Someday, perhaps, we'll do the full lingering_close that
# Apache does, but not today.
pass
class TrueyZero(object):
"""An object which equals and does math like the integer 0 but evals True.
"""
def __add__(self, other):
return other
def __radd__(self, other):
return other
trueyzero = TrueyZero()
_SHUTDOWNREQUEST = None
class WorkerThread(threading.Thread):
"""Thread which continuously polls a Queue for Connection objects.
Due to the timing issues of polling a Queue, a WorkerThread does not
check its own 'ready' flag after it has started. To stop the thread,
it is necessary to stick a _SHUTDOWNREQUEST object onto the Queue
(one for each running WorkerThread).
"""
conn = None
"""The current connection pulled off the Queue, or None."""
server = None
"""The HTTP Server which spawned this thread, and which owns the
Queue and is placing active connections into it."""
ready = False
"""A simple flag for the calling server to know when this thread
has begun polling the Queue."""
def __init__(self, server):
self.ready = False
self.server = server
self.requests_seen = 0
self.bytes_read = 0
self.bytes_written = 0
self.start_time = None
self.work_time = 0
self.stats = {
'Requests': lambda s: self.requests_seen + (
(self.start_time is None) and
trueyzero or
self.conn.requests_seen
),
'Bytes Read': lambda s: self.bytes_read + (
(self.start_time is None) and
trueyzero or
self.conn.rfile.bytes_read
),
'Bytes Written': lambda s: self.bytes_written + (
(self.start_time is None) and
trueyzero or
self.conn.wfile.bytes_written
),
'Work Time': lambda s: self.work_time + (
(self.start_time is None) and
trueyzero or
time.time() - self.start_time
),
'Read Throughput': lambda s: s['Bytes Read'](s) / (
s['Work Time'](s) or 1e-6),
'Write Throughput': lambda s: s['Bytes Written'](s) / (
s['Work Time'](s) or 1e-6),
}
threading.Thread.__init__(self)
def run(self):
self.server.stats['Worker Threads'][self.getName()] = self.stats
try:
self.ready = True
while True:
conn = self.server.requests.get()
if conn is _SHUTDOWNREQUEST:
return
self.conn = conn
if self.server.stats['Enabled']:
self.start_time = time.time()
try:
conn.communicate()
finally:
conn.close()
if self.server.stats['Enabled']:
self.requests_seen += self.conn.requests_seen
self.bytes_read += self.conn.rfile.bytes_read
self.bytes_written += self.conn.wfile.bytes_written
self.work_time += time.time() - self.start_time
self.start_time = None
self.conn = None
except (KeyboardInterrupt, SystemExit):
exc = sys.exc_info()[1]
self.server.interrupt = exc
class ThreadPool(object):
"""A Request Queue for an HTTPServer which pools threads.
ThreadPool objects must provide min, get(), put(obj), start()
and stop(timeout) attributes.
"""
def __init__(self, server, min=10, max=-1,
accepted_queue_size=-1, accepted_queue_timeout=10):
self.server = server
self.min = min
self.max = max
self._threads = []
self._queue = queue.Queue(maxsize=accepted_queue_size)
self._queue_put_timeout = accepted_queue_timeout
self.get = self._queue.get
def start(self):
"""Start the pool of threads."""
for i in range(self.min):
self._threads.append(WorkerThread(self.server))
for worker in self._threads:
worker.setName("CP Server " + worker.getName())
worker.start()
for worker in self._threads:
while not worker.ready:
time.sleep(.1)
def _get_idle(self):
"""Number of worker threads which are idle. Read-only."""
return len([t for t in self._threads if t.conn is None])
idle = property(_get_idle, doc=_get_idle.__doc__)
def put(self, obj):
self._queue.put(obj, block=True, timeout=self._queue_put_timeout)
if obj is _SHUTDOWNREQUEST:
return
def grow(self, amount):
"""Spawn new worker threads (not above self.max)."""
if self.max > 0:
budget = max(self.max - len(self._threads), 0)
else:
# self.max <= 0 indicates no maximum
budget = float('inf')
n_new = min(amount, budget)
workers = [self._spawn_worker() for i in range(n_new)]
while not all(worker.ready for worker in workers):
time.sleep(.1)
self._threads.extend(workers)
def _spawn_worker(self):
worker = WorkerThread(self.server)
worker.setName("CP Server " + worker.getName())
worker.start()
return worker
def shrink(self, amount):
"""Kill off worker threads (not below self.min)."""
# Grow/shrink the pool if necessary.
# Remove any dead threads from our list
for t in self._threads:
if not t.isAlive():
self._threads.remove(t)
amount -= 1
# calculate the number of threads above the minimum
n_extra = max(len(self._threads) - self.min, 0)
# don't remove more than amount
n_to_remove = min(amount, n_extra)
# put shutdown requests on the queue equal to the number of threads
# to remove. As each request is processed by a worker, that worker
# will terminate and be culled from the list.
for n in range(n_to_remove):
self._queue.put(_SHUTDOWNREQUEST)
def stop(self, timeout=5):
# Must shut down threads here so the code that calls
# this method can know when all threads are stopped.
for worker in self._threads:
self._queue.put(_SHUTDOWNREQUEST)
# Don't join currentThread (when stop is called inside a request).
current = threading.currentThread()
if timeout and timeout >= 0:
endtime = time.time() + timeout
while self._threads:
worker = self._threads.pop()
if worker is not current and worker.isAlive():
try:
if timeout is None or timeout < 0:
worker.join()
else:
remaining_time = endtime - time.time()
if remaining_time > 0:
worker.join(remaining_time)
if worker.isAlive():
# We exhausted the timeout.
# Forcibly shut down the socket.
c = worker.conn
if c and not c.rfile.closed:
try:
c.socket.shutdown(socket.SHUT_RD)
except TypeError:
# pyOpenSSL sockets don't take an arg
c.socket.shutdown()
worker.join()
except (AssertionError,
# Ignore repeated Ctrl-C.
# See
# https://github.com/cherrypy/cherrypy/issues/691.
KeyboardInterrupt):
pass
def _get_qsize(self):
return self._queue.qsize()
qsize = property(_get_qsize)
try:
import fcntl
except ImportError:
try:
from ctypes import windll, WinError
import ctypes.wintypes
_SetHandleInformation = windll.kernel32.SetHandleInformation
_SetHandleInformation.argtypes = [
ctypes.wintypes.HANDLE,
ctypes.wintypes.DWORD,
ctypes.wintypes.DWORD,
]
_SetHandleInformation.restype = ctypes.wintypes.BOOL
except ImportError:
def prevent_socket_inheritance(sock):
"""Dummy function, since neither fcntl nor ctypes are available."""
pass
else:
def prevent_socket_inheritance(sock):
"""Mark the given socket fd as non-inheritable (Windows)."""
if not _SetHandleInformation(sock.fileno(), 1, 0):
raise WinError()
else:
def prevent_socket_inheritance(sock):
"""Mark the given socket fd as non-inheritable (POSIX)."""
fd = sock.fileno()
old_flags = fcntl.fcntl(fd, fcntl.F_GETFD)
fcntl.fcntl(fd, fcntl.F_SETFD, old_flags | fcntl.FD_CLOEXEC)
class SSLAdapter(object):
"""Base class for SSL driver library adapters.
Required methods:
* ``wrap(sock) -> (wrapped socket, ssl environ dict)``
* ``makefile(sock, mode='r', bufsize=DEFAULT_BUFFER_SIZE) ->
socket file object``
"""
def __init__(self, certificate, private_key, certificate_chain=None):
self.certificate = certificate
self.private_key = private_key
self.certificate_chain = certificate_chain
def wrap(self, sock):
raise NotImplemented
def makefile(self, sock, mode='r', bufsize=DEFAULT_BUFFER_SIZE):
raise NotImplemented
class HTTPServer(object):
"""An HTTP server."""
_bind_addr = "127.0.0.1"
_interrupt = None
gateway = None
"""A Gateway instance."""
minthreads = None
"""The minimum number of worker threads to create (default 10)."""
maxthreads = None
"""The maximum number of worker threads to create (default -1 = no limit).
"""
server_name = None
"""The name of the server; defaults to socket.gethostname()."""
protocol = "HTTP/1.1"
"""The version string to write in the Status-Line of all HTTP responses.
For example, "HTTP/1.1" is the default. This also limits the supported
features used in the response."""
request_queue_size = 5
"""The 'backlog' arg to socket.listen(); max queued connections
(default 5).
"""
shutdown_timeout = 5
"""The total time, in seconds, to wait for worker threads to cleanly exit.
"""
timeout = 10
"""The timeout in seconds for accepted connections (default 10)."""
version = "CherryPy/" + cp_version
"""A version string for the HTTPServer."""
software = None
"""The value to set for the SERVER_SOFTWARE entry in the WSGI environ.
If None, this defaults to ``'%s Server' % self.version``."""
ready = False
"""An internal flag which marks whether the socket is accepting
connections.
"""
max_request_header_size = 0
"""The maximum size, in bytes, for request headers, or 0 for no limit."""
max_request_body_size = 0
"""The maximum size, in bytes, for request bodies, or 0 for no limit."""
nodelay = True
"""If True (the default since 3.1), sets the TCP_NODELAY socket option."""
ConnectionClass = HTTPConnection
"""The class to use for handling HTTP connections."""
ssl_adapter = None
"""An instance of SSLAdapter (or a subclass).
You must have the corresponding SSL driver library installed."""
def __init__(self, bind_addr, gateway, minthreads=10, maxthreads=-1,
server_name=None):
self.bind_addr = bind_addr
self.gateway = gateway
self.requests = ThreadPool(self, min=minthreads or 1, max=maxthreads)
if not server_name:
server_name = socket.gethostname()
self.server_name = server_name
self.clear_stats()
def clear_stats(self):
self._start_time = None
self._run_time = 0
self.stats = {
'Enabled': False,
'Bind Address': lambda s: repr(self.bind_addr),
'Run time': lambda s: (not s['Enabled']) and -1 or self.runtime(),
'Accepts': 0,
'Accepts/sec': lambda s: s['Accepts'] / self.runtime(),
'Queue': lambda s: getattr(self.requests, "qsize", None),
'Threads': lambda s: len(getattr(self.requests, "_threads", [])),
'Threads Idle': lambda s: getattr(self.requests, "idle", None),
'Socket Errors': 0,
'Requests': lambda s: (not s['Enabled']) and -1 or sum(
[w['Requests'](w) for w in s['Worker Threads'].values()], 0),
'Bytes Read': lambda s: (not s['Enabled']) and -1 or sum(
[w['Bytes Read'](w) for w in s['Worker Threads'].values()], 0),
'Bytes Written': lambda s: (not s['Enabled']) and -1 or sum(
[w['Bytes Written'](w) for w in s['Worker Threads'].values()],
0),
'Work Time': lambda s: (not s['Enabled']) and -1 or sum(
[w['Work Time'](w) for w in s['Worker Threads'].values()], 0),
'Read Throughput': lambda s: (not s['Enabled']) and -1 or sum(
[w['Bytes Read'](w) / (w['Work Time'](w) or 1e-6)
for w in s['Worker Threads'].values()], 0),
'Write Throughput': lambda s: (not s['Enabled']) and -1 or sum(
[w['Bytes Written'](w) / (w['Work Time'](w) or 1e-6)
for w in s['Worker Threads'].values()], 0),
'Worker Threads': {},
}
logging.statistics["CherryPy HTTPServer %d" % id(self)] = self.stats
def runtime(self):
if self._start_time is None:
return self._run_time
else:
return self._run_time + (time.time() - self._start_time)
def __str__(self):
return "%s.%s(%r)" % (self.__module__, self.__class__.__name__,
self.bind_addr)
def _get_bind_addr(self):
return self._bind_addr
def _set_bind_addr(self, value):
if isinstance(value, tuple) and value[0] in ('', None):
# Despite the socket module docs, using '' does not
# allow AI_PASSIVE to work. Passing None instead
# returns '0.0.0.0' like we want. In other words:
# host AI_PASSIVE result
# '' Y 192.168.x.y
# '' N 192.168.x.y
# None Y 0.0.0.0
# None N 127.0.0.1
# But since you can get the same effect with an explicit
# '0.0.0.0', we deny both the empty string and None as values.
raise ValueError("Host values of '' or None are not allowed. "
"Use '0.0.0.0' (IPv4) or '::' (IPv6) instead "
"to listen on all active interfaces.")
self._bind_addr = value
bind_addr = property(
_get_bind_addr,
_set_bind_addr,
doc="""The interface on which to listen for connections.
For TCP sockets, a (host, port) tuple. Host values may be any IPv4
or IPv6 address, or any valid hostname. The string 'localhost' is a
synonym for '127.0.0.1' (or '::1', if your hosts file prefers IPv6).
The string '0.0.0.0' is a special IPv4 entry meaning "any active
interface" (INADDR_ANY), and '::' is the similar IN6ADDR_ANY for
IPv6. The empty string or None are not allowed.
For UNIX sockets, supply the filename as a string.""")
def start(self):
"""Run the server forever."""
# We don't have to trap KeyboardInterrupt or SystemExit here,
# because cherrpy.server already does so, calling self.stop() for us.
# If you're using this server with another framework, you should
# trap those exceptions in whatever code block calls start().
self._interrupt = None
if self.software is None:
self.software = "%s Server" % self.version
# Select the appropriate socket
if isinstance(self.bind_addr, basestring):
# AF_UNIX socket
# So we can reuse the socket...
try:
os.unlink(self.bind_addr)
except:
pass
# So everyone can access the socket...
try:
os.chmod(self.bind_addr, 0o777)
except:
pass
info = [
(socket.AF_UNIX, socket.SOCK_STREAM, 0, "", self.bind_addr)]
else:
# AF_INET or AF_INET6 socket
# Get the correct address family for our host (allows IPv6
# addresses)
host, port = self.bind_addr
try:
info = socket.getaddrinfo(host, port, socket.AF_UNSPEC,
socket.SOCK_STREAM, 0,
socket.AI_PASSIVE)
except socket.gaierror:
if ':' in self.bind_addr[0]:
info = [(socket.AF_INET6, socket.SOCK_STREAM,
0, "", self.bind_addr + (0, 0))]
else:
info = [(socket.AF_INET, socket.SOCK_STREAM,
0, "", self.bind_addr)]
self.socket = None
msg = "No socket could be created"
for res in info:
af, socktype, proto, canonname, sa = res
try:
self.bind(af, socktype, proto)
except socket.error as serr:
msg = "%s -- (%s: %s)" % (msg, sa, serr)
if self.socket:
self.socket.close()
self.socket = None
continue
break
if not self.socket:
raise socket.error(msg)
# Timeout so KeyboardInterrupt can be caught on Win32
self.socket.settimeout(1)
self.socket.listen(self.request_queue_size)
# Create worker threads
self.requests.start()
self.ready = True
self._start_time = time.time()
while self.ready:
try:
self.tick()
except (KeyboardInterrupt, SystemExit):
raise
except:
self.error_log("Error in HTTPServer.tick", level=logging.ERROR,
traceback=True)
if self.interrupt:
while self.interrupt is True:
# Wait for self.stop() to complete. See _set_interrupt.
time.sleep(0.1)
if self.interrupt:
raise self.interrupt
def error_log(self, msg="", level=20, traceback=False):
# Override this in subclasses as desired
sys.stderr.write(msg + '\n')
sys.stderr.flush()
if traceback:
tblines = traceback_.format_exc()
sys.stderr.write(tblines)
sys.stderr.flush()
def bind(self, family, type, proto=0):
"""Create (or recreate) the actual socket object."""
self.socket = socket.socket(family, type, proto)
prevent_socket_inheritance(self.socket)
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
if self.nodelay and not isinstance(self.bind_addr, str):
self.socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
if self.ssl_adapter is not None:
self.socket = self.ssl_adapter.bind(self.socket)
# If listening on the IPV6 any address ('::' = IN6ADDR_ANY),
# activate dual-stack. See
# https://github.com/cherrypy/cherrypy/issues/871.
if (hasattr(socket, 'AF_INET6') and family == socket.AF_INET6
and self.bind_addr[0] in ('::', '::0', '::0.0.0.0')):
try:
self.socket.setsockopt(
socket.IPPROTO_IPV6, socket.IPV6_V6ONLY, 0)
except (AttributeError, socket.error):
# Apparently, the socket option is not available in
# this machine's TCP stack
pass
self.socket.bind(self.bind_addr)
def tick(self):
"""Accept a new connection and put it on the Queue."""
try:
s, addr = self.socket.accept()
if self.stats['Enabled']:
self.stats['Accepts'] += 1
if not self.ready:
return
prevent_socket_inheritance(s)
if hasattr(s, 'settimeout'):
s.settimeout(self.timeout)
makefile = CP_makefile
ssl_env = {}
# if ssl cert and key are set, we try to be a secure HTTP server
if self.ssl_adapter is not None:
try:
s, ssl_env = self.ssl_adapter.wrap(s)
except NoSSLError:
msg = ("The client sent a plain HTTP request, but "
"this server only speaks HTTPS on this port.")
buf = ["%s 400 Bad Request\r\n" % self.protocol,
"Content-Length: %s\r\n" % len(msg),
"Content-Type: text/plain\r\n\r\n",
msg]
wfile = makefile(s, "wb", DEFAULT_BUFFER_SIZE)
try:
wfile.write("".join(buf).encode('ISO-8859-1'))
except socket.error:
x = sys.exc_info()[1]
if x.args[0] not in socket_errors_to_ignore:
raise
return
if not s:
return
makefile = self.ssl_adapter.makefile
# Re-apply our timeout since we may have a new socket object
if hasattr(s, 'settimeout'):
s.settimeout(self.timeout)
conn = self.ConnectionClass(self, s, makefile)
if not isinstance(self.bind_addr, basestring):
# optional values
# Until we do DNS lookups, omit REMOTE_HOST
if addr is None: # sometimes this can happen
# figure out if AF_INET or AF_INET6.
if len(s.getsockname()) == 2:
# AF_INET
addr = ('0.0.0.0', 0)
else:
# AF_INET6
addr = ('::', 0)
conn.remote_addr = addr[0]
conn.remote_port = addr[1]
conn.ssl_env = ssl_env
try:
self.requests.put(conn)
except queue.Full:
# Just drop the conn. TODO: write 503 back?
conn.close()
return
except socket.timeout:
# The only reason for the timeout in start() is so we can
# notice keyboard interrupts on Win32, which don't interrupt
# accept() by default
return
except socket.error:
x = sys.exc_info()[1]
if self.stats['Enabled']:
self.stats['Socket Errors'] += 1
if x.args[0] in socket_error_eintr:
# I *think* this is right. EINTR should occur when a signal
# is received during the accept() call; all docs say retry
# the call, and I *think* I'm reading it right that Python
# will then go ahead and poll for and handle the signal
# elsewhere. See
# https://github.com/cherrypy/cherrypy/issues/707.
return
if x.args[0] in socket_errors_nonblocking:
# Just try again. See
# https://github.com/cherrypy/cherrypy/issues/479.
return
if x.args[0] in socket_errors_to_ignore:
# Our socket was closed.
# See https://github.com/cherrypy/cherrypy/issues/686.
return
raise
def _get_interrupt(self):
return self._interrupt
def _set_interrupt(self, interrupt):
self._interrupt = True
self.stop()
self._interrupt = interrupt
interrupt = property(_get_interrupt, _set_interrupt,
doc="Set this to an Exception instance to "
"interrupt the server.")
def stop(self):
"""Gracefully shutdown a server that is serving forever."""
self.ready = False
if self._start_time is not None:
self._run_time += (time.time() - self._start_time)
self._start_time = None
sock = getattr(self, "socket", None)
if sock:
if not isinstance(self.bind_addr, basestring):
# Touch our own socket to make accept() return immediately.
try:
host, port = sock.getsockname()[:2]
except socket.error:
x = sys.exc_info()[1]
if x.args[0] not in socket_errors_to_ignore:
# Changed to use error code and not message
# See
# https://github.com/cherrypy/cherrypy/issues/860.
raise
else:
# Note that we're explicitly NOT using AI_PASSIVE,
# here, because we want an actual IP to touch.
# localhost won't work if we've bound to a public IP,
# but it will if we bound to '0.0.0.0' (INADDR_ANY).
for res in socket.getaddrinfo(host, port, socket.AF_UNSPEC,
socket.SOCK_STREAM):
af, socktype, proto, canonname, sa = res
s = None
try:
s = socket.socket(af, socktype, proto)
# See
# http://groups.google.com/group/cherrypy-users/
# browse_frm/thread/bbfe5eb39c904fe0
s.settimeout(1.0)
s.connect((host, port))
s.close()
except socket.error:
if s:
s.close()
if hasattr(sock, "close"):
sock.close()
self.socket = None
self.requests.stop(self.shutdown_timeout)
class Gateway(object):
"""A base class to interface HTTPServer with other systems, such as WSGI.
"""
def __init__(self, req):
self.req = req
def respond(self):
"""Process the current request. Must be overridden in a subclass."""
raise NotImplemented
# These may either be wsgiserver.SSLAdapter subclasses or the string names
# of such classes (in which case they will be lazily loaded).
ssl_adapters = {
'builtin': 'cherrypy.wsgiserver.ssl_builtin.BuiltinSSLAdapter',
'pyopenssl': 'cherrypy.wsgiserver.ssl_pyopenssl.pyOpenSSLAdapter',
}
def get_ssl_adapter_class(name='builtin'):
"""Return an SSL adapter class for the given name."""
adapter = ssl_adapters[name.lower()]
if isinstance(adapter, basestring):
last_dot = adapter.rfind(".")
attr_name = adapter[last_dot + 1:]
mod_path = adapter[:last_dot]
try:
mod = sys.modules[mod_path]
if mod is None:
raise KeyError()
except KeyError:
# The last [''] is important.
mod = __import__(mod_path, globals(), locals(), [''])
# Let an AttributeError propagate outward.
try:
adapter = getattr(mod, attr_name)
except AttributeError:
raise AttributeError("'%s' object has no attribute '%s'"
% (mod_path, attr_name))
return adapter
# ------------------------------- WSGI Stuff -------------------------------- #
class CherryPyWSGIServer(HTTPServer):
"""A subclass of HTTPServer which calls a WSGI application."""
wsgi_version = (1, 0)
"""The version of WSGI to produce."""
def __init__(self, bind_addr, wsgi_app, numthreads=10, server_name=None,
max=-1, request_queue_size=5, timeout=10, shutdown_timeout=5,
accepted_queue_size=-1, accepted_queue_timeout=10):
self.requests = ThreadPool(self, min=numthreads or 1, max=max,
accepted_queue_size=accepted_queue_size,
accepted_queue_timeout=accepted_queue_timeout)
self.wsgi_app = wsgi_app
self.gateway = wsgi_gateways[self.wsgi_version]
self.bind_addr = bind_addr
if not server_name:
server_name = socket.gethostname()
self.server_name = server_name
self.request_queue_size = request_queue_size
self.timeout = timeout
self.shutdown_timeout = shutdown_timeout
self.clear_stats()
def _get_numthreads(self):
return self.requests.min
def _set_numthreads(self, value):
self.requests.min = value
numthreads = property(_get_numthreads, _set_numthreads)
class WSGIGateway(Gateway):
"""A base class to interface HTTPServer with WSGI."""
def __init__(self, req):
self.req = req
self.started_response = False
self.env = self.get_environ()
self.remaining_bytes_out = None
def get_environ(self):
"""Return a new environ dict targeting the given wsgi.version"""
raise NotImplemented
def respond(self):
"""Process the current request."""
response = self.req.server.wsgi_app(self.env, self.start_response)
try:
for chunk in response:
# "The start_response callable must not actually transmit
# the response headers. Instead, it must store them for the
# server or gateway to transmit only after the first
# iteration of the application return value that yields
# a NON-EMPTY string, or upon the application's first
# invocation of the write() callable." (PEP 333)
if chunk:
if isinstance(chunk, unicodestr):
chunk = chunk.encode('ISO-8859-1')
self.write(chunk)
finally:
if hasattr(response, "close"):
response.close()
def start_response(self, status, headers, exc_info=None):
"""WSGI callable to begin the HTTP response."""
# "The application may call start_response more than once,
# if and only if the exc_info argument is provided."
if self.started_response and not exc_info:
raise AssertionError("WSGI start_response called a second "
"time with no exc_info.")
self.started_response = True
# "if exc_info is provided, and the HTTP headers have already been
# sent, start_response must raise an error, and should raise the
# exc_info tuple."
if self.req.sent_headers:
try:
raise exc_info[0](exc_info[1]).with_traceback(exc_info[2])
finally:
exc_info = None
# According to PEP 3333, when using Python 3, the response status
# and headers must be bytes masquerading as unicode; that is, they
# must be of type "str" but are restricted to code points in the
# "latin-1" set.
if not isinstance(status, str):
raise TypeError("WSGI response status is not of type str.")
self.req.status = status.encode('ISO-8859-1')
for k, v in headers:
if not isinstance(k, str):
raise TypeError(
"WSGI response header key %r is not of type str." % k)
if not isinstance(v, str):
raise TypeError(
"WSGI response header value %r is not of type str." % v)
if k.lower() == 'content-length':
self.remaining_bytes_out = int(v)
self.req.outheaders.append(
(k.encode('ISO-8859-1'), v.encode('ISO-8859-1')))
return self.write
def write(self, chunk):
"""WSGI callable to write unbuffered data to the client.
This method is also used internally by start_response (to write
data from the iterable returned by the WSGI application).
"""
if not self.started_response:
raise AssertionError("WSGI write called before start_response.")
chunklen = len(chunk)
rbo = self.remaining_bytes_out
if rbo is not None and chunklen > rbo:
if not self.req.sent_headers:
# Whew. We can send a 500 to the client.
self.req.simple_response("500 Internal Server Error",
"The requested resource returned "
"more bytes than the declared "
"Content-Length.")
else:
# Dang. We have probably already sent data. Truncate the chunk
# to fit (so the client doesn't hang) and raise an error later.
chunk = chunk[:rbo]
if not self.req.sent_headers:
self.req.sent_headers = True
self.req.send_headers()
self.req.write(chunk)
if rbo is not None:
rbo -= chunklen
if rbo < 0:
raise ValueError(
"Response body exceeds the declared Content-Length.")
class WSGIGateway_10(WSGIGateway):
"""A Gateway class to interface HTTPServer with WSGI 1.0.x."""
def get_environ(self):
"""Return a new environ dict targeting the given wsgi.version"""
req = self.req
env = {
# set a non-standard environ entry so the WSGI app can know what
# the *real* server protocol is (and what features to support).
# See http://www.faqs.org/rfcs/rfc2145.html.
'ACTUAL_SERVER_PROTOCOL': req.server.protocol,
'PATH_INFO': req.path.decode('ISO-8859-1'),
'QUERY_STRING': req.qs.decode('ISO-8859-1'),
'REMOTE_ADDR': req.conn.remote_addr or '',
'REMOTE_PORT': str(req.conn.remote_port or ''),
'REQUEST_METHOD': req.method.decode('ISO-8859-1'),
'REQUEST_URI': req.uri.decode('ISO-8859-1'),
'SCRIPT_NAME': '',
'SERVER_NAME': req.server.server_name,
# Bah. "SERVER_PROTOCOL" is actually the REQUEST protocol.
'SERVER_PROTOCOL': req.request_protocol.decode('ISO-8859-1'),
'SERVER_SOFTWARE': req.server.software,
'wsgi.errors': sys.stderr,
'wsgi.input': req.rfile,
'wsgi.multiprocess': False,
'wsgi.multithread': True,
'wsgi.run_once': False,
'wsgi.url_scheme': req.scheme.decode('ISO-8859-1'),
'wsgi.version': (1, 0),
}
if isinstance(req.server.bind_addr, basestring):
# AF_UNIX. This isn't really allowed by WSGI, which doesn't
# address unix domain sockets. But it's better than nothing.
env["SERVER_PORT"] = ""
else:
env["SERVER_PORT"] = str(req.server.bind_addr[1])
# Request headers
for k, v in req.inheaders.items():
k = k.decode('ISO-8859-1').upper().replace("-", "_")
env["HTTP_" + k] = v.decode('ISO-8859-1')
# CONTENT_TYPE/CONTENT_LENGTH
ct = env.pop("HTTP_CONTENT_TYPE", None)
if ct is not None:
env["CONTENT_TYPE"] = ct
cl = env.pop("HTTP_CONTENT_LENGTH", None)
if cl is not None:
env["CONTENT_LENGTH"] = cl
if req.conn.ssl_env:
env.update(req.conn.ssl_env)
return env
class WSGIGateway_u0(WSGIGateway_10):
"""A Gateway class to interface HTTPServer with WSGI u.0.
WSGI u.0 is an experimental protocol, which uses unicode for keys
and values in both Python 2 and Python 3.
"""
def get_environ(self):
"""Return a new environ dict targeting the given wsgi.version"""
req = self.req
env_10 = WSGIGateway_10.get_environ(self)
env = env_10.copy()
env['wsgi.version'] = ('u', 0)
# Request-URI
env.setdefault('wsgi.url_encoding', 'utf-8')
try:
# SCRIPT_NAME is the empty string, who cares what encoding it is?
env["PATH_INFO"] = req.path.decode(env['wsgi.url_encoding'])
env["QUERY_STRING"] = req.qs.decode(env['wsgi.url_encoding'])
except UnicodeDecodeError:
# Fall back to latin 1 so apps can transcode if needed.
env['wsgi.url_encoding'] = 'ISO-8859-1'
env["PATH_INFO"] = env_10["PATH_INFO"]
env["QUERY_STRING"] = env_10["QUERY_STRING"]
return env
wsgi_gateways = {
(1, 0): WSGIGateway_10,
('u', 0): WSGIGateway_u0,
}
class WSGIPathInfoDispatcher(object):
"""A WSGI dispatcher for dispatch based on the PATH_INFO.
apps: a dict or list of (path_prefix, app) pairs.
"""
def __init__(self, apps):
try:
apps = list(apps.items())
except AttributeError:
pass
# Sort the apps by len(path), descending
apps.sort()
apps.reverse()
# The path_prefix strings must start, but not end, with a slash.
# Use "" instead of "/".
self.apps = [(p.rstrip("/"), a) for p, a in apps]
def __call__(self, environ, start_response):
path = environ["PATH_INFO"] or "/"
for p, app in self.apps:
# The apps list should be sorted by length, descending.
if path.startswith(p + "/") or path == p:
environ = environ.copy()
environ["SCRIPT_NAME"] = environ["SCRIPT_NAME"] + p
environ["PATH_INFO"] = path[len(p):]
return app(environ, start_response)
start_response('404 Not Found', [('Content-Type', 'text/plain'),
('Content-Length', '0')])
return ['']
webpy/web/wsgiserver/LICENSE.txt 0000644 0001750 0001750 00000003006 13146625266 015152 0 ustar wmb wmb Copyright (c) 2004-2007, CherryPy Team (team@cherrypy.org)
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the CherryPy Team nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
webpy/web/test.py 0000644 0001750 0001750 00000004325 13146625266 012465 0 ustar wmb wmb """test utilities
(part of web.py)
"""
import unittest, doctest
import sys, re
import web
from .py3helpers import PY2
TestCase = unittest.TestCase
TestSuite = unittest.TestSuite
def load_modules(names):
return [__import__(name, None, None, "x") for name in names]
def module_suite(module, classnames=None):
"""Makes a suite from a module."""
if classnames:
return unittest.TestLoader().loadTestsFromNames(classnames, module)
elif hasattr(module, 'suite'):
return module.suite()
else:
return unittest.TestLoader().loadTestsFromModule(module)
#Little wrapper needed for automatically adapting doctests between Py2 and Py3
#Source : Dirkjan Ochtman (https://dirkjan.ochtman.nl/writing/2014/07/06/single-source-python-23-doctests.html)
class Py23DocChecker(doctest.OutputChecker):
def check_output(self, want, got, optionflags):
if not PY2:
#Differences between unicode strings representations : u"foo" -> "foo"
want = re.sub("u'(.*?)'", "'\\1'", want)
want = re.sub('u"(.*?)"', '"\\1"', want)
#NameError message has changed
want = want.replace('NameError: global name', 'NameError: name')
else:
want = re.sub("^b'(.*?)'", "'\\1'", want)
want = re.sub('^b"(.*?)"', '"\\1"', want)
return doctest.OutputChecker.check_output(self, want, got, optionflags)
def doctest_suite(module_names):
"""Makes a test suite from doctests."""
suite = TestSuite()
for mod in load_modules(module_names):
suite.addTest(doctest.DocTestSuite(mod, checker=Py23DocChecker()))
return suite
def suite(module_names):
"""Creates a suite from multiple modules."""
suite = TestSuite()
for mod in load_modules(module_names):
suite.addTest(module_suite(mod))
return suite
def runTests(suite):
runner = unittest.TextTestRunner()
return runner.run(suite)
def main(suite=None):
if not suite:
main_module = __import__('__main__')
# allow command line switches
args = [a for a in sys.argv[1:] if not a.startswith('-')]
suite = module_suite(main_module, args or None)
result = runTests(suite)
sys.exit(not result.wasSuccessful())
webpy/web/httpserver.py 0000644 0001750 0001750 00000027226 13146625266 013721 0 ustar wmb wmb from __future__ import print_function
import sys, os
import urllib
import posixpath
from . import webapi as web
from . import net
from . import utils
from .py3helpers import PY2
if PY2:
from SimpleHTTPServer import SimpleHTTPRequestHandler
from BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler
else:
from http.server import HTTPServer, SimpleHTTPRequestHandler, BaseHTTPRequestHandler
try:
from urllib import parse as urlparse
from urllib.parse import unquote
except ImportError:
import urlparse
from urllib import unquote
try:
from io import BytesIO
except ImportError:
from StringIO import BytesIO
__all__ = ["runsimple"]
def runbasic(func, server_address=("0.0.0.0", 8080)):
"""
Runs a simple HTTP server hosting WSGI app `func`. The directory `static/`
is hosted statically.
Based on [WsgiServer][ws] from [Colin Stewart][cs].
[ws]: http://www.owlfish.com/software/wsgiutils/documentation/wsgi-server-api.html
[cs]: http://www.owlfish.com/
"""
# Copyright (c) 2004 Colin Stewart (http://www.owlfish.com/)
# Modified somewhat for simplicity
# Used under the modified BSD license:
# http://www.xfree86.org/3.3.6/COPYRIGHT2.html#5
import SocketServer
import socket, errno
import traceback
class WSGIHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):
def run_wsgi_app(self):
protocol, host, path, parameters, query, fragment = \
urlparse.urlparse('http://dummyhost%s' % self.path)
# we only use path, query
env = {'wsgi.version': (1, 0)
,'wsgi.url_scheme': 'http'
,'wsgi.input': self.rfile
,'wsgi.errors': sys.stderr
,'wsgi.multithread': 1
,'wsgi.multiprocess': 0
,'wsgi.run_once': 0
,'REQUEST_METHOD': self.command
,'REQUEST_URI': self.path
,'PATH_INFO': path
,'QUERY_STRING': query
,'CONTENT_TYPE': self.headers.get('Content-Type', '')
,'CONTENT_LENGTH': self.headers.get('Content-Length', '')
,'REMOTE_ADDR': self.client_address[0]
,'SERVER_NAME': self.server.server_address[0]
,'SERVER_PORT': str(self.server.server_address[1])
,'SERVER_PROTOCOL': self.request_version
}
for http_header, http_value in self.headers.items():
env ['HTTP_%s' % http_header.replace('-', '_').upper()] = \
http_value
# Setup the state
self.wsgi_sent_headers = 0
self.wsgi_headers = []
try:
# We have there environment, now invoke the application
result = self.server.app(env, self.wsgi_start_response)
try:
try:
for data in result:
if data:
self.wsgi_write_data(data)
finally:
if hasattr(result, 'close'):
result.close()
except socket.error as socket_err:
# Catch common network errors and suppress them
if (socket_err.args[0] in \
(errno.ECONNABORTED, errno.EPIPE)):
return
except socket.timeout as socket_timeout:
return
except:
print(traceback.format_exc(), file=web.debug)
if (not self.wsgi_sent_headers):
# We must write out something!
self.wsgi_write_data(" ")
return
do_POST = run_wsgi_app
do_PUT = run_wsgi_app
do_DELETE = run_wsgi_app
def do_GET(self):
if self.path.startswith('/static/'):
SimpleHTTPServer.SimpleHTTPRequestHandler.do_GET(self)
else:
self.run_wsgi_app()
def wsgi_start_response(self, response_status, response_headers,
exc_info=None):
if (self.wsgi_sent_headers):
raise Exception \
("Headers already sent and start_response called again!")
# Should really take a copy to avoid changes in the application....
self.wsgi_headers = (response_status, response_headers)
return self.wsgi_write_data
def wsgi_write_data(self, data):
if (not self.wsgi_sent_headers):
status, headers = self.wsgi_headers
# Need to send header prior to data
status_code = status[:status.find(' ')]
status_msg = status[status.find(' ') + 1:]
self.send_response(int(status_code), status_msg)
for header, value in headers:
self.send_header(header, value)
self.end_headers()
self.wsgi_sent_headers = 1
# Send the data
self.wfile.write(data)
class WSGIServer(SocketServer.ThreadingMixIn, HTTPServer):
def __init__(self, func, server_address):
HTTPServer.HTTPServer.__init__(self,
server_address,
WSGIHandler)
self.app = func
self.serverShuttingDown = 0
print("http://%s:%d/" % server_address)
WSGIServer(func, server_address).serve_forever()
# The WSGIServer instance.
# Made global so that it can be stopped in embedded mode.
server = None
def runsimple(func, server_address=("0.0.0.0", 8080)):
"""
Runs [CherryPy][cp] WSGI server hosting WSGI app `func`.
The directory `static/` is hosted statically.
[cp]: http://www.cherrypy.org
"""
global server
func = StaticMiddleware(func)
func = LogMiddleware(func)
server = WSGIServer(server_address, func)
if server.ssl_adapter:
print("https://%s:%d/" % server_address)
else:
print("http://%s:%d/" % server_address)
try:
server.start()
except (KeyboardInterrupt, SystemExit):
server.stop()
server = None
def WSGIServer(server_address, wsgi_app):
"""Creates CherryPy WSGI server listening at `server_address` to serve `wsgi_app`.
This function can be overwritten to customize the webserver or use a different webserver.
"""
from . import wsgiserver
# Default values of wsgiserver.ssl_adapters uses cherrypy.wsgiserver
# prefix. Overwriting it make it work with web.wsgiserver.
wsgiserver.ssl_adapters = {
'builtin': 'web.wsgiserver.ssl_builtin.BuiltinSSLAdapter',
'pyopenssl': 'web.wsgiserver.ssl_pyopenssl.pyOpenSSLAdapter',
}
server = wsgiserver.CherryPyWSGIServer(server_address, wsgi_app, server_name="localhost")
def create_ssl_adapter(cert, key):
# wsgiserver tries to import submodules as cherrypy.wsgiserver.foo.
# That doesn't work as not it is web.wsgiserver.
# Patching sys.modules temporarily to make it work.
import types
cherrypy = types.ModuleType('cherrypy')
cherrypy.wsgiserver = wsgiserver
sys.modules['cherrypy'] = cherrypy
sys.modules['cherrypy.wsgiserver'] = wsgiserver
from wsgiserver.ssl_pyopenssl import pyOpenSSLAdapter
adapter = pyOpenSSLAdapter(cert, key)
# We are done with our work. Cleanup the patches.
del sys.modules['cherrypy']
del sys.modules['cherrypy.wsgiserver']
return adapter
# SSL backward compatibility
if (server.ssl_adapter is None and
getattr(server, 'ssl_certificate', None) and
getattr(server, 'ssl_private_key', None)):
server.ssl_adapter = create_ssl_adapter(server.ssl_certificate, server.ssl_private_key)
server.nodelay = not sys.platform.startswith('java') # TCP_NODELAY isn't supported on the JVM
return server
class StaticApp(SimpleHTTPRequestHandler):
"""WSGI application for serving static files."""
def __init__(self, environ, start_response):
self.headers = []
self.environ = environ
self.start_response = start_response
def send_response(self, status, msg=""):
#the int(status) call is needed because in Py3 status is an enum.IntEnum and we need the integer behind
self.status = str(int(status)) + " " + msg
def send_header(self, name, value):
self.headers.append((name, value))
def end_headers(self):
pass
def log_message(*a): pass
def __iter__(self):
environ = self.environ
self.path = environ.get('PATH_INFO', '')
self.client_address = environ.get('REMOTE_ADDR','-'), \
environ.get('REMOTE_PORT','-')
self.command = environ.get('REQUEST_METHOD', '-')
self.wfile = BytesIO() # for capturing error
try:
path = self.translate_path(self.path)
etag = '"%s"' % os.path.getmtime(path)
client_etag = environ.get('HTTP_IF_NONE_MATCH')
self.send_header('ETag', etag)
if etag == client_etag:
self.send_response(304, "Not Modified")
self.start_response(self.status, self.headers)
raise StopIteration()
except OSError:
pass # Probably a 404
f = self.send_head()
self.start_response(self.status, self.headers)
if f:
block_size = 16 * 1024
while True:
buf = f.read(block_size)
if not buf:
break
yield buf
f.close()
else:
value = self.wfile.getvalue()
yield value
class StaticMiddleware:
"""WSGI middleware for serving static files."""
def __init__(self, app, prefix='/static/'):
self.app = app
self.prefix = prefix
def __call__(self, environ, start_response):
path = environ.get('PATH_INFO', '')
path = self.normpath(path)
if path.startswith(self.prefix):
return StaticApp(environ, start_response)
else:
return self.app(environ, start_response)
def normpath(self, path):
path2 = posixpath.normpath(unquote(path))
if path.endswith("/"):
path2 += "/"
return path2
class LogMiddleware:
"""WSGI middleware for logging the status."""
def __init__(self, app):
self.app = app
self.format = '%s - - [%s] "%s %s %s" - %s'
f = BytesIO()
class FakeSocket:
def makefile(self, *a):
return f
# take log_date_time_string method from BaseHTTPRequestHandler
self.log_date_time_string = BaseHTTPRequestHandler(FakeSocket(), None, None).log_date_time_string
def __call__(self, environ, start_response):
def xstart_response(status, response_headers, *args):
out = start_response(status, response_headers, *args)
self.log(status, environ)
return out
return self.app(environ, xstart_response)
def log(self, status, environ):
outfile = environ.get('wsgi.errors', web.debug)
req = environ.get('PATH_INFO', '_')
protocol = environ.get('ACTUAL_SERVER_PROTOCOL', '-')
method = environ.get('REQUEST_METHOD', '-')
host = "%s:%s" % (environ.get('REMOTE_ADDR','-'),
environ.get('REMOTE_PORT','-'))
time = self.log_date_time_string()
msg = self.format % (host, time, protocol, method, req, status)
print(utils.safestr(msg), file=outfile)
webpy/web/net.py 0000644 0001750 0001750 00000014630 13146625266 012274 0 ustar wmb wmb """
Network Utilities
(from web.py)
"""
__all__ = [
"validipaddr", "validip6addr", "validipport", "validip", "validaddr",
"urlquote",
"httpdate", "parsehttpdate",
"htmlquote", "htmlunquote", "websafe",
]
import urllib, time
import datetime
import re
import socket
try:
from urllib.parse import quote
except ImportError:
from urllib import quote
from .py3helpers import PY2
def validip6addr(address):
"""
Returns True if `address` is a valid IPv6 address.
>>> validip6addr('::')
True
>>> validip6addr('aaaa:bbbb:cccc:dddd::1')
True
>>> validip6addr('1:2:3:4:5:6:7:8:9:10')
False
>>> validip6addr('12:10')
False
"""
try:
socket.inet_pton(socket.AF_INET6, address)
except (socket.error, AttributeError):
return False
return True
def validipaddr(address):
"""
Returns True if `address` is a valid IPv4 address.
>>> validipaddr('192.168.1.1')
True
>>> validipaddr('192.168.1.800')
False
>>> validipaddr('192.168.1')
False
"""
try:
octets = address.split('.')
if len(octets) != 4:
return False
for x in octets:
if not (0 <= int(x) <= 255):
return False
except ValueError:
return False
return True
def validipport(port):
"""
Returns True if `port` is a valid IPv4 port.
>>> validipport('9000')
True
>>> validipport('foo')
False
>>> validipport('1000000')
False
"""
try:
if not (0 <= int(port) <= 65535):
return False
except ValueError:
return False
return True
def validip(ip, defaultaddr="0.0.0.0", defaultport=8080):
"""
Returns `(ip_address, port)` from string `ip_addr_port`
>>> validip('1.2.3.4')
('1.2.3.4', 8080)
>>> validip('80')
('0.0.0.0', 80)
>>> validip('192.168.0.1:85')
('192.168.0.1', 85)
>>> validip('::')
('::', 8080)
>>> validip('[::]:88')
('::', 88)
>>> validip('[::1]:80')
('::1', 80)
"""
addr = defaultaddr
port = defaultport
#Matt Boswell's code to check for ipv6 first
match = re.search(r'^\[([^]]+)\](?::(\d+))?$',ip) #check for [ipv6]:port
if match:
if validip6addr(match.group(1)):
if match.group(2):
if validipport(match.group(2)): return (match.group(1),int(match.group(2)))
else:
return (match.group(1),port)
else:
if validip6addr(ip): return (ip,port)
#end ipv6 code
ip = ip.split(":", 1)
if len(ip) == 1:
if not ip[0]:
pass
elif validipaddr(ip[0]):
addr = ip[0]
elif validipport(ip[0]):
port = int(ip[0])
else:
raise ValueError(':'.join(ip) + ' is not a valid IP address/port')
elif len(ip) == 2:
addr, port = ip
if not validipaddr(addr) or not validipport(port):
raise ValueError(':'.join(ip) + ' is not a valid IP address/port')
port = int(port)
else:
raise ValueError(':'.join(ip) + ' is not a valid IP address/port')
return (addr, port)
def validaddr(string_):
"""
Returns either (ip_address, port) or "/path/to/socket" from string_
>>> validaddr('/path/to/socket')
'/path/to/socket'
>>> validaddr('8000')
('0.0.0.0', 8000)
>>> validaddr('127.0.0.1')
('127.0.0.1', 8080)
>>> validaddr('127.0.0.1:8000')
('127.0.0.1', 8000)
>>> validip('[::1]:80')
('::1', 80)
>>> validaddr('fff')
Traceback (most recent call last):
...
ValueError: fff is not a valid IP address/port
"""
if '/' in string_:
return string_
else:
return validip(string_)
def urlquote(val):
"""
Quotes a string for use in a URL.
>>> urlquote('://?f=1&j=1')
'%3A//%3Ff%3D1%26j%3D1'
>>> urlquote(None)
''
>>> urlquote(u'\u203d')
'%E2%80%BD'
"""
if val is None: return ''
if PY2:
if isinstance(val, unicode):
val = val.encode('utf-8')
else:
val = str(val)
else:
val = str(val).encode('utf-8')
return quote(val)
def httpdate(date_obj):
"""
Formats a datetime object for use in HTTP headers.
>>> import datetime
>>> httpdate(datetime.datetime(1970, 1, 1, 1, 1, 1))
'Thu, 01 Jan 1970 01:01:01 GMT'
"""
return date_obj.strftime("%a, %d %b %Y %H:%M:%S GMT")
def parsehttpdate(string_):
"""
Parses an HTTP date into a datetime object.
>>> parsehttpdate('Thu, 01 Jan 1970 01:01:01 GMT')
datetime.datetime(1970, 1, 1, 1, 1, 1)
"""
try:
t = time.strptime(string_, "%a, %d %b %Y %H:%M:%S %Z")
except ValueError:
return None
return datetime.datetime(*t[:6])
def htmlquote(text):
r"""
Encodes `text` for raw use in HTML.
>>> htmlquote(u"<'&\">")
u'<'&">'
"""
text = text.replace(u"&", u"&") # Must be done first!
text = text.replace(u"<", u"<")
text = text.replace(u">", u">")
text = text.replace(u"'", u"'")
text = text.replace(u'"', u""")
return text
def htmlunquote(text):
r"""
Decodes `text` that's HTML quoted.
>>> htmlunquote(u'<'&">')
u'<\'&">'
"""
text = text.replace(u""", u'"')
text = text.replace(u"'", u"'")
text = text.replace(u">", u">")
text = text.replace(u"<", u"<")
text = text.replace(u"&", u"&") # Must be done last!
return text
def websafe(val):
r"""Converts `val` so that it is safe for use in Unicode HTML.
>>> websafe("<'&\">")
u'<'&">'
>>> websafe(None)
u''
>>> websafe(u'\u203d') == u'\u203d'
True
"""
if val is None:
return u''
if PY2:
if isinstance(val, str):
val = val.decode('utf-8')
elif not isinstance(val, unicode):
val = unicode(val)
else:
if isinstance(val, bytes):
val = val.decode('utf-8')
elif not isinstance(val, str):
val = str(val)
return htmlquote(val)
if __name__ == "__main__":
import doctest
doctest.testmod()
webpy/web/http.py 0000644 0001750 0001750 00000011013 13146625266 012455 0 ustar wmb wmb """
HTTP Utilities
(from web.py)
"""
__all__ = [
"expires", "lastmodified",
"prefixurl", "modified",
"changequery", "url",
"profiler",
]
import sys, os, threading, urllib
import datetime
from . import net, utils, webapi as web
from .py3helpers import iteritems
try:
from urllib.parse import urlencode as urllib_urlencode
except ImportError:
from urllib import urlencode as urllib_urlencode
def prefixurl(base=''):
"""
Sorry, this function is really difficult to explain.
Maybe some other time.
"""
url = web.ctx.path.lstrip('/')
for i in xrange(url.count('/')):
base += '../'
if not base:
base = './'
return base
def expires(delta):
"""
Outputs an `Expires` header for `delta` from now.
`delta` is a `timedelta` object or a number of seconds.
"""
if isinstance(delta, (int, long)):
delta = datetime.timedelta(seconds=delta)
date_obj = datetime.datetime.utcnow() + delta
web.header('Expires', net.httpdate(date_obj))
def lastmodified(date_obj):
"""Outputs a `Last-Modified` header for `datetime`."""
web.header('Last-Modified', net.httpdate(date_obj))
def modified(date=None, etag=None):
"""
Checks to see if the page has been modified since the version in the
requester's cache.
When you publish pages, you can include `Last-Modified` and `ETag`
with the date the page was last modified and an opaque token for
the particular version, respectively. When readers reload the page,
the browser sends along the modification date and etag value for
the version it has in its cache. If the page hasn't changed,
the server can just return `304 Not Modified` and not have to
send the whole page again.
This function takes the last-modified date `date` and the ETag `etag`
and checks the headers to see if they match. If they do, it returns
`True`, or otherwise it raises NotModified error. It also sets
`Last-Modified` and `ETag` output headers.
"""
try:
from __builtin__ import set
except ImportError:
# for python 2.3
from sets import Set as set
n = set([x.strip('" ') for x in web.ctx.env.get('HTTP_IF_NONE_MATCH', '').split(',')])
m = net.parsehttpdate(web.ctx.env.get('HTTP_IF_MODIFIED_SINCE', '').split(';')[0])
validate = False
if etag:
if '*' in n or etag in n:
validate = True
if date and m:
# we subtract a second because
# HTTP dates don't have sub-second precision
if date-datetime.timedelta(seconds=1) <= m:
validate = True
if date: lastmodified(date)
if etag: web.header('ETag', '"' + etag + '"')
if validate:
raise web.notmodified()
else:
return True
def urlencode(query, doseq=0):
"""
Same as urllib.urlencode, but supports unicode strings.
>>> urlencode({'text':'foo bar'})
'text=foo+bar'
>>> urlencode({'x': [1, 2]}, doseq=True)
'x=1&x=2'
"""
def convert(value, doseq=False):
if doseq and isinstance(value, list):
return [convert(v) for v in value]
else:
return utils.safestr(value)
query = dict([(k, convert(v, doseq)) for k, v in query.items()])
return urllib_urlencode(query, doseq=doseq)
def changequery(query=None, **kw):
"""
Imagine you're at `/foo?a=1&b=2`. Then `changequery(a=3)` will return
`/foo?a=3&b=2` -- the same URL but with the arguments you requested
changed.
"""
if query is None:
query = web.rawinput(method='get')
for k, v in iteritems(kw):
if v is None:
query.pop(k, None)
else:
query[k] = v
out = web.ctx.path
if query:
out += '?' + urlencode(query, doseq=True)
return out
def url(path=None, doseq=False, **kw):
"""
Makes url by concatenating web.ctx.homepath and path and the
query string created using the arguments.
"""
if path is None:
path = web.ctx.path
if path.startswith("/"):
out = web.ctx.homepath + path
else:
out = path
if kw:
out += '?' + urlencode(kw, doseq=doseq)
return out
def profiler(app):
"""Outputs basic profiling information at the bottom of each response."""
from utils import profile
def profile_internal(e, o):
out, result = profile(app)(e, o)
return list(out) + ['' + net.websafe(result) + ' ']
return profile_internal
if __name__ == "__main__":
import doctest
doctest.testmod()
webpy/web/debugerror.py 0000644 0001750 0001750 00000030407 13146625266 013646 0 ustar wmb wmb """
pretty debug errors
(part of web.py)
portions adapted from Django
Copyright (c) 2005, the Lawrence Journal-World
Used under the modified BSD license:
http://www.xfree86.org/3.3.6/COPYRIGHT2.html#5
"""
__all__ = ["debugerror", "djangoerror", "emailerrors"]
import sys, pprint, traceback
from .template import Template
from .net import websafe
from .utils import sendmail, safestr
from . import webapi as web
from .py3helpers import urljoin, PY2
if PY2:
def update_globals_template(t, globals):
t.t.func_globals.update(globals)
else:
def update_globals_template(t, globals):
t.t.__globals__.update(globals)
import os, os.path
whereami = os.path.join(os.getcwd(), __file__)
whereami = os.path.sep.join(whereami.split(os.path.sep)[:-1])
djangoerror_t = """\
$def with (exception_type, exception_value, frames)
$exception_type at $ctx.path
$def dicttable (d, kls='req', id=None):
$ items = d and list(d.items()) or []
$items.sort()
$:dicttable_items(items, kls, id)
$def dicttable_items(items, kls='req', id=None):
$if items:
Variable Value
$for k, v in items:
$k $prettify(v)
$else:
No data.
$exception_type at $ctx.path
$exception_value
Python
$frames[0].filename in $frames[0].function, line $frames[0].lineno
Web
$ctx.method $ctx.home$ctx.path
Traceback (innermost first)
$for frame in frames:
$frame.filename
in $frame.function
$if frame.context_line is not None:
$if frame.pre_context:
$for line in frame.pre_context:
$line
$frame.context_line ...
$if frame.post_context:
$for line in frame.post_context:
$line
$if frame.vars:
▶ Local vars
$# $inspect.formatargvalues(*inspect.getargvalues(frame['tb'].tb_frame))
$:dicttable(frame.vars, kls='vars', id=('v' + str(frame.id)))
$if ctx.output or ctx.headers:
Response so far
HEADERS
$:dicttable_items(ctx.headers)
BODY
$ctx.output
Request information
INPUT
$:dicttable(web.input(_unicode=False))
COOKIES
$:dicttable(web.cookies())
$ newctx = [(k, v) for (k, v) in ctx.iteritems() if not k.startswith('_') and not isinstance(v, dict)]
$:dicttable(dict(newctx))
$:dicttable(ctx.env)
You're seeing this error because you have web.config.debug
set to True
. Set that to False
if you don't want to see this.
"""
djangoerror_r = None
def djangoerror():
def _get_lines_from_file(filename, lineno, context_lines):
"""
Returns context_lines before and after lineno from file.
Returns (pre_context_lineno, pre_context, context_line, post_context).
"""
try:
source = open(filename).readlines()
lower_bound = max(0, lineno - context_lines)
upper_bound = lineno + context_lines
pre_context = \
[line.strip('\n') for line in source[lower_bound:lineno]]
context_line = source[lineno].strip('\n')
post_context = \
[line.strip('\n') for line in source[lineno + 1:upper_bound]]
return lower_bound, pre_context, context_line, post_context
except (OSError, IOError, IndexError):
return None, [], None, []
exception_type, exception_value, tback = sys.exc_info()
frames = []
while tback is not None:
filename = tback.tb_frame.f_code.co_filename
function = tback.tb_frame.f_code.co_name
lineno = tback.tb_lineno - 1
# hack to get correct line number for templates
lineno += tback.tb_frame.f_locals.get("__lineoffset__", 0)
pre_context_lineno, pre_context, context_line, post_context = \
_get_lines_from_file(filename, lineno, 7)
if '__hidetraceback__' not in tback.tb_frame.f_locals:
frames.append(web.storage({
'tback': tback,
'filename': filename,
'function': function,
'lineno': lineno,
'vars': tback.tb_frame.f_locals,
'id': id(tback),
'pre_context': pre_context,
'context_line': context_line,
'post_context': post_context,
'pre_context_lineno': pre_context_lineno,
}))
tback = tback.tb_next
frames.reverse()
def prettify(x):
try:
out = pprint.pformat(x)
except Exception as e:
out = '[could not display: <' + e.__class__.__name__ + \
': '+str(e)+'>]'
return out
global djangoerror_r
if djangoerror_r is None:
djangoerror_r = Template(djangoerror_t, filename=__file__, filter=websafe)
t = djangoerror_r
globals = {'ctx': web.ctx, 'web':web, 'dict':dict, 'str':str, 'prettify': prettify}
update_globals_template(t, globals)
return t(exception_type, exception_value, frames)
def debugerror():
"""
A replacement for `internalerror` that presents a nice page with lots
of debug information for the programmer.
(Based on the beautiful 500 page from [Django](http://djangoproject.com/),
designed by [Wilson Miner](http://wilsonminer.com/).)
"""
return web._InternalError(djangoerror())
def emailerrors(to_address, olderror, from_address=None):
"""
Wraps the old `internalerror` handler (pass as `olderror`) to
additionally email all errors to `to_address`, to aid in
debugging production websites.
Emails contain a normal text traceback as well as an
attachment containing the nice `debugerror` page.
"""
from_address = from_address or to_address
def emailerrors_internal():
error = olderror()
tb = sys.exc_info()
error_name = tb[0]
error_value = tb[1]
tb_txt = ''.join(traceback.format_exception(*tb))
path = web.ctx.path
request = web.ctx.method + ' ' + web.ctx.home + web.ctx.fullpath
message = "\n%s\n\n%s\n\n" % (request, tb_txt)
sendmail(
"your buggy site <%s>" % from_address,
"the bugfixer <%s>" % to_address,
"bug: %(error_name)s: %(error_value)s (%(path)s)" % locals(),
message,
attachments=[
dict(filename="bug.html", content=safestr(djangoerror()))
],
)
return error
return emailerrors_internal
if __name__ == "__main__":
urls = (
'/', 'index'
)
from .application import application
app = application(urls, globals())
app.internalerror = debugerror
class index:
def GET(self):
thisdoesnotexist
app.run()
webpy/web/session.py 0000644 0001750 0001750 00000025251 13146625266 013172 0 ustar wmb wmb """
Session Management
(from web.py)
"""
import os, time, datetime, random, base64
import os.path
from copy import deepcopy
try:
import cPickle as pickle
except ImportError:
import pickle
from hashlib import sha1
from . import utils
from . import webapi as web
from .py3helpers import PY2
__all__ = [
'Session', 'SessionExpired',
'Store', 'DiskStore', 'DBStore',
]
web.config.session_parameters = utils.storage({
'cookie_name': 'webpy_session_id',
'cookie_domain': None,
'cookie_path' : None,
'timeout': 86400, #24 * 60 * 60, # 24 hours in seconds
'ignore_expiry': True,
'ignore_change_ip': True,
'secret_key': 'fLjUfxqXtfNoIldA0A0J',
'expired_message': 'Session expired',
'httponly': True,
'secure': False
})
class SessionExpired(web.HTTPError):
def __init__(self, message):
web.HTTPError.__init__(self, '200 OK', {}, data=message)
class Session(object):
"""Session management for web.py
"""
__slots__ = [
"store", "_initializer", "_last_cleanup_time", "_config", "_data",
"__getitem__", "__setitem__", "__delitem__"
]
def __init__(self, app, store, initializer=None):
self.store = store
self._initializer = initializer
self._last_cleanup_time = 0
self._config = utils.storage(web.config.session_parameters)
self._data = utils.threadeddict()
self.__getitem__ = self._data.__getitem__
self.__setitem__ = self._data.__setitem__
self.__delitem__ = self._data.__delitem__
if app:
app.add_processor(self._processor)
def __contains__(self, name):
return name in self._data
def __getattr__(self, name):
return getattr(self._data, name)
def __setattr__(self, name, value):
if name in self.__slots__:
object.__setattr__(self, name, value)
else:
setattr(self._data, name, value)
def __delattr__(self, name):
delattr(self._data, name)
def _processor(self, handler):
"""Application processor to setup session for every request"""
self._cleanup()
self._load()
try:
return handler()
finally:
self._save()
def _load(self):
"""Load the session from the store, by the id from cookie"""
cookie_name = self._config.cookie_name
cookie_domain = self._config.cookie_domain
cookie_path = self._config.cookie_path
httponly = self._config.httponly
self.session_id = web.cookies().get(cookie_name)
# protection against session_id tampering
if self.session_id and not self._valid_session_id(self.session_id):
self.session_id = None
self._check_expiry()
if self.session_id:
d = self.store[self.session_id]
self.update(d)
self._validate_ip()
if not self.session_id:
self.session_id = self._generate_session_id()
if self._initializer:
if isinstance(self._initializer, dict):
self.update(deepcopy(self._initializer))
elif hasattr(self._initializer, '__call__'):
self._initializer()
self.ip = web.ctx.ip
def _check_expiry(self):
# check for expiry
if self.session_id and self.session_id not in self.store:
if self._config.ignore_expiry:
self.session_id = None
else:
return self.expired()
def _validate_ip(self):
# check for change of IP
if self.session_id and self.get('ip', None) != web.ctx.ip:
if not self._config.ignore_change_ip:
return self.expired()
def _save(self):
if not self.get('_killed'):
self._setcookie(self.session_id)
self.store[self.session_id] = dict(self._data)
else:
self._setcookie(self.session_id, expires=-1)
def _setcookie(self, session_id, expires='', **kw):
cookie_name = self._config.cookie_name
cookie_domain = self._config.cookie_domain
cookie_path = self._config.cookie_path
httponly = self._config.httponly
secure = self._config.secure
web.setcookie(cookie_name, session_id, expires=expires, domain=cookie_domain, httponly=httponly, secure=secure, path=cookie_path)
def _generate_session_id(self):
"""Generate a random id for session"""
while True:
rand = os.urandom(16)
now = time.time()
secret_key = self._config.secret_key
hashable = "%s%s%s%s" %(rand, now, utils.safestr(web.ctx.ip), secret_key)
session_id = sha1(hashable if PY2 else hashable.encode('utf-8')) #TODO maybe a better way to deal with this, without using an if-statement
session_id = session_id.hexdigest()
if session_id not in self.store:
break
return session_id
def _valid_session_id(self, session_id):
rx = utils.re_compile('^[0-9a-fA-F]+$')
return rx.match(session_id)
def _cleanup(self):
"""Cleanup the stored sessions"""
current_time = time.time()
timeout = self._config.timeout
if current_time - self._last_cleanup_time > timeout:
self.store.cleanup(timeout)
self._last_cleanup_time = current_time
def expired(self):
"""Called when an expired session is atime"""
self._killed = True
self._save()
raise SessionExpired(self._config.expired_message)
def kill(self):
"""Kill the session, make it no longer available"""
del self.store[self.session_id]
self._killed = True
class Store:
"""Base class for session stores"""
def __contains__(self, key):
raise NotImplementedError()
def __getitem__(self, key):
raise NotImplementedError()
def __setitem__(self, key, value):
raise NotImplementedError()
def cleanup(self, timeout):
"""removes all the expired sessions"""
raise NotImplementedError()
def encode(self, session_dict):
"""encodes session dict as a string"""
pickled = pickle.dumps(session_dict)
return base64.encodestring(pickled)
def decode(self, session_data):
"""decodes the data to get back the session dict """
pickled = base64.decodestring(session_data)
return pickle.loads(pickled)
class DiskStore(Store):
"""
Store for saving a session on disk.
>>> import tempfile
>>> root = tempfile.mkdtemp()
>>> s = DiskStore(root)
>>> s['a'] = 'foo'
>>> s['a']
'foo'
>>> time.sleep(0.01)
>>> s.cleanup(0.01)
>>> s['a']
Traceback (most recent call last):
...
KeyError: 'a'
"""
def __init__(self, root):
# if the storage root doesn't exists, create it.
if not os.path.exists(root):
os.makedirs(
os.path.abspath(root)
)
self.root = root
def _get_path(self, key):
if os.path.sep in key:
raise ValueError("Bad key: %s" % repr(key))
return os.path.join(self.root, key)
def __contains__(self, key):
path = self._get_path(key)
return os.path.exists(path)
def __getitem__(self, key):
path = self._get_path(key)
if os.path.exists(path):
pickled = open(path, 'rb').read()
return self.decode(pickled)
else:
raise KeyError(key)
def __setitem__(self, key, value):
path = self._get_path(key)
pickled = self.encode(value)
try:
f = open(path, 'wb')
try:
f.write(pickled)
finally:
f.close()
except IOError:
pass
def __delitem__(self, key):
path = self._get_path(key)
if os.path.exists(path):
os.remove(path)
def cleanup(self, timeout):
now = time.time()
for f in os.listdir(self.root):
path = self._get_path(f)
atime = os.stat(path).st_atime
if now - atime > timeout :
os.remove(path)
class DBStore(Store):
"""Store for saving a session in database
Needs a table with the following columns:
session_id CHAR(128) UNIQUE NOT NULL,
atime DATETIME NOT NULL default current_timestamp,
data TEXT
"""
def __init__(self, db, table_name):
self.db = db
self.table = table_name
def __contains__(self, key):
data = self.db.select(self.table, where="session_id=$key", vars=locals())
return bool(list(data))
def __getitem__(self, key):
now = datetime.datetime.now()
try:
s = self.db.select(self.table, where="session_id=$key", vars=locals())[0]
self.db.update(self.table, where="session_id=$key", atime=now, vars=locals())
except IndexError:
raise KeyError(key)
else:
return self.decode(s.data)
def __setitem__(self, key, value):
pickled = self.encode(value)
now = datetime.datetime.now()
if key in self:
self.db.update(self.table, where="session_id=$key", data=pickled,atime=now, vars=locals())
else:
self.db.insert(self.table, False, session_id=key, atime=now, data=pickled )
def __delitem__(self, key):
self.db.delete(self.table, where="session_id=$key", vars=locals())
def cleanup(self, timeout):
timeout = datetime.timedelta(timeout/(24.0*60*60)) #timedelta takes numdays as arg
last_allowed_time = datetime.datetime.now() - timeout
self.db.delete(self.table, where="$last_allowed_time > atime", vars=locals())
class ShelfStore:
"""Store for saving session using `shelve` module.
import shelve
store = ShelfStore(shelve.open('session.shelf'))
XXX: is shelve thread-safe?
"""
def __init__(self, shelf):
self.shelf = shelf
def __contains__(self, key):
return key in self.shelf
def __getitem__(self, key):
atime, v = self.shelf[key]
self[key] = v # update atime
return v
def __setitem__(self, key, value):
self.shelf[key] = time.time(), value
def __delitem__(self, key):
try:
del self.shelf[key]
except KeyError:
pass
def cleanup(self, timeout):
now = time.time()
for k in self.shelf.keys():
atime, v = self.shelf[k]
if now - atime > timeout :
del self[k]
if __name__ == '__main__' :
import doctest
doctest.testmod()
webpy/web/template.py 0000644 0001750 0001750 00000142142 13146625266 013321 0 ustar wmb wmb """
simple, elegant templating
(part of web.py)
Template design:
Template string is split into tokens and the tokens are combined into nodes.
Parse tree is a nodelist. TextNode and ExpressionNode are simple nodes and
for-loop, if-loop etc are block nodes, which contain multiple child nodes.
Each node can emit some python string. python string emitted by the
root node is validated for safeeval and executed using python in the given environment.
Enough care is taken to make sure the generated code and the template has line to line match,
so that the error messages can point to exact line number in template. (It doesn't work in some cases still.)
Grammar:
template -> defwith sections
defwith -> '$def with (' arguments ')' | ''
sections -> section*
section -> block | assignment | line
assignment -> '$ '
line -> (text|expr)*
text ->
expr -> '$' pyexpr | '$(' pyexpr ')' | '${' pyexpr '}'
pyexpr ->
"""
from __future__ import print_function
__all__ = [
"Template",
"Render", "render", "frender",
"ParseError", "SecurityError",
"test"
]
import tokenize
import os
import sys
import glob
import re
import warnings
import ast
from .utils import storage, safeunicode, safestr, re_compile
from .webapi import config
from .net import websafe
from .py3helpers import PY2, iteritems
if PY2:
from UserDict import DictMixin
# Make a new-style class
class MutableMapping(object, DictMixin):
pass
else:
from collections import MutableMapping
def splitline(text):
r"""
Splits the given text at newline.
>>> splitline('foo\nbar')
('foo\n', 'bar')
>>> splitline('foo')
('foo', '')
>>> splitline('')
('', '')
"""
index = text.find('\n') + 1
if index:
return text[:index], text[index:]
else:
return text, ''
class Parser:
"""Parser Base.
"""
def __init__(self):
self.statement_nodes = STATEMENT_NODES
self.keywords = KEYWORDS
def parse(self, text, name=""):
self.text = text
self.name = name
defwith, text = self.read_defwith(text)
suite = self.read_suite(text)
return DefwithNode(defwith, suite)
def read_defwith(self, text):
if text.startswith('$def with'):
defwith, text = splitline(text)
defwith = defwith[1:].strip() # strip $ and spaces
return defwith, text
else:
return '', text
def read_section(self, text):
r"""Reads one section from the given text.
section -> block | assignment | line
>>> read_section = Parser().read_section
>>> read_section('foo\nbar\n')
(, 'bar\n')
>>> read_section('$ a = b + 1\nfoo\n')
(, 'foo\n')
read_section('$for in range(10):\n hello $i\nfoo)
"""
if text.lstrip(' ').startswith('$'):
index = text.index('$')
begin_indent, text2 = text[:index], text[index+1:]
ahead = self.python_lookahead(text2)
if ahead == 'var':
return self.read_var(text2)
elif ahead in self.statement_nodes:
return self.read_block_section(text2, begin_indent)
elif ahead in self.keywords:
return self.read_keyword(text2)
elif ahead.strip() == '':
# assignments starts with a space after $
# ex: $ a = b + 2
return self.read_assignment(text2)
return self.readline(text)
def read_var(self, text):
r"""Reads a var statement.
>>> read_var = Parser().read_var
>>> read_var('var x=10\nfoo')
(, 'foo')
>>> read_var('var x: hello $name\nfoo')
(, 'foo')
"""
line, text = splitline(text)
tokens = self.python_tokens(line)
if len(tokens) < 4:
raise SyntaxError('Invalid var statement')
name = tokens[1]
sep = tokens[2]
value = line.split(sep, 1)[1].strip()
if sep == '=':
pass # no need to process value
elif sep == ':':
#@@ Hack for backward-compatability
if tokens[3] == '\n': # multi-line var statement
block, text = self.read_indented_block(text, ' ')
lines = [self.readline(x)[0] for x in block.splitlines()]
nodes = []
for x in lines:
nodes.extend(x.nodes)
nodes.append(TextNode('\n'))
else: # single-line var statement
linenode, _ = self.readline(value)
nodes = linenode.nodes
parts = [node.emit('') for node in nodes]
value = "join_(%s)" % ", ".join(parts)
else:
raise SyntaxError('Invalid var statement')
return VarNode(name, value), text
def read_suite(self, text):
r"""Reads section by section till end of text.
>>> read_suite = Parser().read_suite
>>> read_suite('hello $name\nfoo\n')
[, ]
"""
sections = []
while text:
section, text = self.read_section(text)
sections.append(section)
return SuiteNode(sections)
def readline(self, text):
r"""Reads one line from the text. Newline is supressed if the line ends with \.
>>> readline = Parser().readline
>>> readline('hello $name!\nbye!')
(, 'bye!')
>>> readline('hello $name!\\\nbye!')
(, 'bye!')
>>> readline('$f()\n\n')
(, '\n')
"""
line, text = splitline(text)
# supress new line if line ends with \
if line.endswith('\\\n'):
line = line[:-2]
nodes = []
while line:
node, line = self.read_node(line)
nodes.append(node)
return LineNode(nodes), text
def read_node(self, text):
r"""Reads a node from the given text and returns the node and remaining text.
>>> read_node = Parser().read_node
>>> read_node('hello $name')
(t'hello ', '$name')
>>> read_node('$name')
($name, '')
"""
if text.startswith('$$'):
return TextNode('$'), text[2:]
elif text.startswith('$#'): # comment
line, text = splitline(text)
return TextNode('\n'), text
elif text.startswith('$'):
text = text[1:] # strip $
if text.startswith(':'):
escape = False
text = text[1:] # strip :
else:
escape = True
return self.read_expr(text, escape=escape)
else:
return self.read_text(text)
def read_text(self, text):
r"""Reads a text node from the given text.
>>> read_text = Parser().read_text
>>> read_text('hello $name')
(t'hello ', '$name')
"""
index = text.find('$')
if index < 0:
return TextNode(text), ''
else:
return TextNode(text[:index]), text[index:]
def read_keyword(self, text):
line, text = splitline(text)
return StatementNode(line.strip() + "\n"), text
def read_expr(self, text, escape=True):
"""Reads a python expression from the text and returns the expression and remaining text.
expr -> simple_expr | paren_expr
simple_expr -> id extended_expr
extended_expr -> attr_access | paren_expr extended_expr | ''
attr_access -> dot id extended_expr
paren_expr -> [ tokens ] | ( tokens ) | { tokens }
>>> read_expr = Parser().read_expr
>>> read_expr("name")
($name, '')
>>> read_expr("a.b and c")
($a.b, ' and c')
>>> read_expr("a. b")
($a, '. b')
>>> read_expr("name")
($name, '')
>>> read_expr("(limit)ing")
($(limit), 'ing')
>>> read_expr('a[1, 2][:3].f(1+2, "weird string[).", 3 + 4) done.')
($a[1, 2][:3].f(1+2, "weird string[).", 3 + 4), ' done.')
"""
def simple_expr():
identifier()
extended_expr()
def identifier():
next(tokens)
def extended_expr():
lookahead = tokens.lookahead()
if lookahead is None:
return
elif lookahead.value == '.':
attr_access()
elif lookahead.value in parens:
paren_expr()
extended_expr()
else:
return
def attr_access():
from token import NAME # python token constants
dot = tokens.lookahead()
if tokens.lookahead2().type == NAME:
next(tokens) # consume dot
identifier()
extended_expr()
def paren_expr():
begin = next(tokens).value
end = parens[begin]
while True:
if tokens.lookahead().value in parens:
paren_expr()
else:
t = next(tokens)
if t.value == end:
break
return
parens = {
"(": ")",
"[": "]",
"{": "}"
}
def get_tokens(text):
"""tokenize text using python tokenizer.
Python tokenizer ignores spaces, but they might be important in some cases.
This function introduces dummy space tokens when it identifies any ignored space.
Each token is a storage object containing type, value, begin and end.
"""
i = iter([text])
readline = lambda: next(i)
end = None
for t in tokenize.generate_tokens(readline):
t = storage(type=t[0], value=t[1], begin=t[2], end=t[3])
if end is not None and end != t.begin:
_, x1 = end
_, x2 = t.begin
yield storage(type=-1, value=text[x1:x2], begin=end, end=t.begin)
end = t.end
yield t
class BetterIter:
"""Iterator like object with 2 support for 2 look aheads."""
def __init__(self, items):
self.iteritems = iter(items)
self.items = []
self.position = 0
self.current_item = None
def lookahead(self):
if len(self.items) <= self.position:
self.items.append(self._next())
return self.items[self.position]
def _next(self):
try:
return next(self.iteritems)
except StopIteration:
return None
def lookahead2(self):
if len(self.items) <= self.position+1:
self.items.append(self._next())
return self.items[self.position+1]
def __next__(self):
self.current_item = self.lookahead()
self.position += 1
return self.current_item
next = __next__ #Needed for Py2 compatibility
tokens = BetterIter(get_tokens(text))
if tokens.lookahead().value in parens:
paren_expr()
else:
simple_expr()
row, col = tokens.current_item.end
return ExpressionNode(text[:col], escape=escape), text[col:]
def read_assignment(self, text):
r"""Reads assignment statement from text.
>>> read_assignment = Parser().read_assignment
>>> read_assignment('a = b + 1\nfoo')
(, 'foo')
"""
line, text = splitline(text)
return AssignmentNode(line.strip()), text
def python_lookahead(self, text):
"""Returns the first python token from the given text.
>>> python_lookahead = Parser().python_lookahead
>>> python_lookahead('for i in range(10):')
'for'
>>> python_lookahead('else:')
'else'
>>> python_lookahead(' x = 1')
' '
"""
i = iter([text])
readline = lambda: next(i)
tokens = tokenize.generate_tokens(readline)
return next(tokens)[1]
def python_tokens(self, text):
i = iter([text])
readline = lambda: next(i)
tokens = tokenize.generate_tokens(readline)
return [t[1] for t in tokens]
def read_indented_block(self, text, indent):
r"""Read a block of text. A block is what typically follows a for or it statement.
It can be in the same line as that of the statement or an indented block.
>>> read_indented_block = Parser().read_indented_block
>>> read_indented_block(' a\n b\nc', ' ')
('a\nb\n', 'c')
>>> read_indented_block(' a\n b\n c\nd', ' ')
('a\n b\nc\n', 'd')
>>> read_indented_block(' a\n\n b\nc', ' ')
('a\n\n b\n', 'c')
"""
if indent == '':
return '', text
block = ""
while text:
line, text2 = splitline(text)
if line.strip() == "":
block += '\n'
elif line.startswith(indent):
block += line[len(indent):]
else:
break
text = text2
return block, text
def read_statement(self, text):
r"""Reads a python statement.
>>> read_statement = Parser().read_statement
>>> read_statement('for i in range(10): hello $name')
('for i in range(10):', ' hello $name')
"""
tok = PythonTokenizer(text)
tok.consume_till(':')
return text[:tok.index], text[tok.index:]
def read_block_section(self, text, begin_indent=''):
r"""
>>> read_block_section = Parser().read_block_section
>>> read_block_section('for i in range(10): hello $i\nfoo')
(]>, 'foo')
>>> read_block_section('for i in range(10):\n hello $i\n foo', begin_indent=' ')
(]>, ' foo')
>>> read_block_section('for i in range(10):\n hello $i\nfoo')
(]>, 'foo')
"""
line, text = splitline(text)
stmt, line = self.read_statement(line)
keyword = self.python_lookahead(stmt)
# if there is some thing left in the line
if line.strip():
block = line.lstrip()
else:
def find_indent(text):
rx = re_compile(' +')
match = rx.match(text)
first_indent = match and match.group(0)
return first_indent or ""
# find the indentation of the block by looking at the first line
first_indent = find_indent(text)[len(begin_indent):]
#TODO: fix this special case
if keyword == "code":
indent = begin_indent + first_indent
else:
indent = begin_indent + min(first_indent, INDENT)
block, text = self.read_indented_block(text, indent)
return self.create_block_node(keyword, stmt, block, begin_indent), text
def create_block_node(self, keyword, stmt, block, begin_indent):
if keyword in self.statement_nodes:
return self.statement_nodes[keyword](stmt, block, begin_indent)
else:
raise ParseError('Unknown statement: %s' % repr(keyword))
class PythonTokenizer:
"""Utility wrapper over python tokenizer."""
def __init__(self, text):
self.text = text
i = iter([text])
readline = lambda: next(i)
self.tokens = tokenize.generate_tokens(readline)
self.index = 0
def consume_till(self, delim):
"""Consumes tokens till colon.
>>> tok = PythonTokenizer('for i in range(10): hello $i')
>>> tok.consume_till(':')
>>> tok.text[:tok.index]
'for i in range(10):'
>>> tok.text[tok.index:]
' hello $i'
"""
try:
while True:
t = next(self)
if t.value == delim:
break
elif t.value == '(':
self.consume_till(')')
elif t.value == '[':
self.consume_till(']')
elif t.value == '{':
self.consume_till('}')
# if end of line is found, it is an exception.
# Since there is no easy way to report the line number,
# leave the error reporting to the python parser later
#@@ This should be fixed.
if t.value == '\n':
break
except:
#raise ParseError, "Expected %s, found end of line." % repr(delim)
# raising ParseError doesn't show the line number.
# if this error is ignored, then it will be caught when compiling the python code.
return
def __next__(self):
type, t, begin, end, line = next(self.tokens)
row, col = end
self.index = col
return storage(type=type, value=t, begin=begin, end=end)
next = __next__ #needed for Py2 compatibility
class DefwithNode:
def __init__(self, defwith, suite):
if defwith:
self.defwith = defwith.replace('with', '__template__') + ':'
# offset 4 lines. for encoding, __lineoffset__, loop and self.
self.defwith += "\n __lineoffset__ = -4"
else:
self.defwith = 'def __template__():'
# offset 4 lines for encoding, __template__, __lineoffset__, loop and self.
self.defwith += "\n __lineoffset__ = -5"
self.defwith += "\n loop = ForLoop()"
self.defwith += "\n self = TemplateResult(); extend_ = self.extend"
self.suite = suite
self.end = "\n return self"
def emit(self, indent):
encoding = "# coding: utf-8\n"
return encoding + self.defwith + self.suite.emit(indent + INDENT) + self.end
def __repr__(self):
return "" % (self.defwith, self.suite)
class TextNode:
def __init__(self, value):
self.value = value
def emit(self, indent, begin_indent=''):
return repr(safeunicode(self.value))
def __repr__(self):
return 't' + repr(self.value)
class ExpressionNode:
def __init__(self, value, escape=True):
self.value = value.strip()
# convert ${...} to $(...)
if value.startswith('{') and value.endswith('}'):
self.value = '(' + self.value[1:-1] + ')'
self.escape = escape
def emit(self, indent, begin_indent=''):
return 'escape_(%s, %s)' % (self.value, bool(self.escape))
def __repr__(self):
if self.escape:
escape = ''
else:
escape = ':'
return "$%s%s" % (escape, self.value)
class AssignmentNode:
def __init__(self, code):
self.code = code
def emit(self, indent, begin_indent=''):
return indent + self.code + "\n"
def __repr__(self):
return "" % repr(self.code)
class LineNode:
def __init__(self, nodes):
self.nodes = nodes
def emit(self, indent, text_indent='', name=''):
text = [node.emit('') for node in self.nodes]
if text_indent:
text = [repr(text_indent)] + text
return indent + "extend_([%s])\n" % ", ".join(text)
def __repr__(self):
return "" % repr(self.nodes)
INDENT = ' ' # 4 spaces
class BlockNode:
def __init__(self, stmt, block, begin_indent=''):
self.stmt = stmt
self.suite = Parser().read_suite(block)
self.begin_indent = begin_indent
def emit(self, indent, text_indent=''):
text_indent = self.begin_indent + text_indent
out = indent + self.stmt + self.suite.emit(indent + INDENT, text_indent)
return out
def __repr__(self):
return "" % (repr(self.stmt), repr(self.suite))
class ForNode(BlockNode):
def __init__(self, stmt, block, begin_indent=''):
self.original_stmt = stmt
tok = PythonTokenizer(stmt)
tok.consume_till('in')
a = stmt[:tok.index] # for i in
b = stmt[tok.index:-1] # rest of for stmt excluding :
stmt = a + ' loop.setup(' + b.strip() + '):'
BlockNode.__init__(self, stmt, block, begin_indent)
def __repr__(self):
return "" % (repr(self.original_stmt), repr(self.suite))
class CodeNode:
def __init__(self, stmt, block, begin_indent=''):
# compensate one line for $code:
self.code = "\n" + block
def emit(self, indent, text_indent=''):
import re
rx = re.compile('^', re.M)
return rx.sub(indent, self.code).rstrip(' ')
def __repr__(self):
return "" % repr(self.code)
class StatementNode:
def __init__(self, stmt):
self.stmt = stmt
def emit(self, indent, begin_indent=''):
return indent + self.stmt
def __repr__(self):
return "" % repr(self.stmt)
class IfNode(BlockNode):
pass
class ElseNode(BlockNode):
pass
class ElifNode(BlockNode):
pass
class DefNode(BlockNode):
def __init__(self, *a, **kw):
BlockNode.__init__(self, *a, **kw)
code = CodeNode("", "")
code.code = "self = TemplateResult(); extend_ = self.extend\n"
self.suite.sections.insert(0, code)
code = CodeNode("", "")
code.code = "return self\n"
self.suite.sections.append(code)
def emit(self, indent, text_indent=''):
text_indent = self.begin_indent + text_indent
out = indent + self.stmt + self.suite.emit(indent + INDENT, text_indent)
return indent + "__lineoffset__ -= 3\n" + out
class VarNode:
def __init__(self, name, value):
self.name = name
self.value = value
def emit(self, indent, text_indent):
return indent + "self[%s] = %s\n" % (repr(self.name), self.value)
def __repr__(self):
return "" % (self.name, self.value)
class SuiteNode:
"""Suite is a list of sections."""
def __init__(self, sections):
self.sections = sections
def emit(self, indent, text_indent=''):
return "\n" + "".join([s.emit(indent, text_indent) for s in self.sections])
def __repr__(self):
return repr(self.sections)
STATEMENT_NODES = {
'for': ForNode,
'while': BlockNode,
'if': IfNode,
'elif': ElifNode,
'else': ElseNode,
'def': DefNode,
'code': CodeNode
}
KEYWORDS = [
"pass",
"break",
"continue",
"return"
]
TEMPLATE_BUILTIN_NAMES = [
"dict", "enumerate", "float", "int", "bool", "list", "long", "reversed",
"set", "slice", "tuple", "xrange",
"abs", "all", "any", "callable", "chr", "cmp", "divmod", "filter", "hex",
"id", "isinstance", "iter", "len", "max", "min", "oct", "ord", "pow", "range",
"True", "False",
"None",
"__import__", # some c-libraries like datetime requires __import__ to present in the namespace
]
if PY2:
import __builtin__ as builtins
else:
import builtins
TEMPLATE_BUILTINS = dict([(name, getattr(builtins, name)) for name in TEMPLATE_BUILTIN_NAMES if name in builtins.__dict__])
class ForLoop:
"""
Wrapper for expression in for stament to support loop.xxx helpers.
>>> loop = ForLoop()
>>> for x in loop.setup(['a', 'b', 'c']):
... print(loop.index, loop.revindex, loop.parity, x)
...
1 3 odd a
2 2 even b
3 1 odd c
>>> loop.index
Traceback (most recent call last):
...
AttributeError: index
"""
def __init__(self):
self._ctx = None
def __getattr__(self, name):
if self._ctx is None:
raise AttributeError(name)
else:
return getattr(self._ctx, name)
def setup(self, seq):
self._push()
return self._ctx.setup(seq)
def _push(self):
self._ctx = ForLoopContext(self, self._ctx)
def _pop(self):
self._ctx = self._ctx.parent
class ForLoopContext:
"""Stackable context for ForLoop to support nested for loops.
"""
def __init__(self, forloop, parent):
self._forloop = forloop
self.parent = parent
def setup(self, seq):
try:
self.length = len(seq)
except:
self.length = 0
self.index = 0
for a in seq:
self.index += 1
yield a
self._forloop._pop()
index0 = property(lambda self: self.index-1)
first = property(lambda self: self.index == 1)
last = property(lambda self: self.index == self.length)
odd = property(lambda self: self.index % 2 == 1)
even = property(lambda self: self.index % 2 == 0)
parity = property(lambda self: ['odd', 'even'][self.even])
revindex0 = property(lambda self: self.length - self.index)
revindex = property(lambda self: self.length - self.index + 1)
class BaseTemplate:
def __init__(self, code, filename, filter, globals, builtins):
self.filename = filename
self.filter = filter
self._globals = globals
self._builtins = builtins
if code:
self.t = self._compile(code)
else:
self.t = lambda: ''
def _compile(self, code):
env = self.make_env(self._globals or {}, self._builtins)
exec(code, env)
#__template__ is a global function declared when executing "code"
return env['__template__']
def __call__(self, *a, **kw):
__hidetraceback__ = True
return self.t(*a, **kw)
def make_env(self, globals, builtins):
return dict(globals,
__builtins__=builtins,
ForLoop=ForLoop,
TemplateResult=TemplateResult,
escape_=self._escape,
join_=self._join
)
def _join(self, *items):
return u"".join(items)
def _escape(self, value, escape=False):
if value is None:
value = ''
value = safeunicode(value)
if escape and self.filter:
value = self.filter(value)
return value
class Template(BaseTemplate):
CONTENT_TYPES = {
'.html' : 'text/html; charset=utf-8',
'.xhtml' : 'application/xhtml+xml; charset=utf-8',
'.txt' : 'text/plain',
}
FILTERS = {
'.html': websafe,
'.xhtml': websafe,
'.xml': websafe
}
globals = {}
def __init__(self, text, filename='', filter=None, globals=None, builtins=None, extensions=None):
self.extensions = extensions or []
text = Template.normalize_text(text)
code = self.compile_template(text, filename)
_, ext = os.path.splitext(filename)
filter = filter or self.FILTERS.get(ext, None)
self.content_type = self.CONTENT_TYPES.get(ext, None)
if globals is None:
globals = self.globals
if builtins is None:
builtins = TEMPLATE_BUILTINS
BaseTemplate.__init__(self, code=code, filename=filename, filter=filter, globals=globals, builtins=builtins)
def normalize_text(text):
"""Normalizes template text by correcting \r\n, tabs and BOM chars."""
text = text.replace('\r\n', '\n').replace('\r', '\n').expandtabs()
if not text.endswith('\n'):
text += '\n'
# ignore BOM chars at the begining of template
BOM = '\xef\xbb\xbf'
if isinstance(text, str) and text.startswith(BOM):
text = text[len(BOM):]
# support fort \$ for backward-compatibility
text = text.replace(r'\$', '$$')
return text
normalize_text = staticmethod(normalize_text)
def __call__(self, *a, **kw):
__hidetraceback__ = True
from . import webapi as web
if 'headers' in web.ctx and self.content_type:
web.header('Content-Type', self.content_type, unique=True)
return BaseTemplate.__call__(self, *a, **kw)
def generate_code(text, filename, parser=None):
# parse the text
parser = parser or Parser()
rootnode = parser.parse(text, filename)
# generate python code from the parse tree
code = rootnode.emit(indent="").strip()
return safestr(code)
generate_code = staticmethod(generate_code)
def create_parser(self):
p = Parser()
for ext in self.extensions:
p = ext(p)
return p
def compile_template(self, template_string, filename):
code = Template.generate_code(template_string, filename, parser=self.create_parser())
def get_source_line(filename, lineno):
try:
lines = open(filename).read().splitlines()
return lines[lineno]
except:
return None
try:
# compile the code first to report the errors, if any, with the filename
compiled_code = compile(code, filename, 'exec')
except SyntaxError as err:
# display template line that caused the error along with the traceback.
# this works in Py3 but not Py2, duh ? TODO
err.msg += '\n\nTemplate traceback:\n File %s, line %s\n %s' % \
(repr(err.filename), err.lineno, get_source_line(err.filename, err.lineno-1))
raise
# make sure code is safe
ast_node = ast.parse(code, filename)
SafeVisitor().walk(ast_node, filename)
return compiled_code
class CompiledTemplate(Template):
def __init__(self, f, filename):
Template.__init__(self, '', filename)
self.t = f
def compile_template(self, *a):
return None
def _compile(self, *a):
return None
class Render:
"""The most preferred way of using templates.
render = web.template.render('templates')
print render.foo()
Optional parameter can be `base` can be used to pass output of
every template through the base template.
render = web.template.render('templates', base='layout')
"""
def __init__(self, loc='templates', cache=None, base=None, **keywords):
self._loc = loc
self._keywords = keywords
if cache is None:
cache = not config.get('debug', False)
if cache:
self._cache = {}
else:
self._cache = None
if base and not hasattr(base, '__call__'):
# make base a function, so that it can be passed to sub-renders
self._base = lambda page: self._template(base)(page)
else:
self._base = base
def _add_global(self, obj, name=None):
"""Add a global to this rendering instance."""
if 'globals' not in self._keywords: self._keywords['globals'] = {}
if not name:
name = obj.__name__
self._keywords['globals'][name] = obj
def _lookup(self, name):
path = os.path.join(self._loc, name)
if os.path.isdir(path):
return 'dir', path
else:
path = self._findfile(path)
if path:
return 'file', path
else:
return 'none', None
def _load_template(self, name):
kind, path = self._lookup(name)
if kind == 'dir':
return Render(path, cache=self._cache is not None, base=self._base, **self._keywords)
elif kind == 'file':
return Template(open(path).read(), filename=path, **self._keywords)
else:
raise AttributeError("No template named " + name)
def _findfile(self, path_prefix):
p = [f for f in glob.glob(path_prefix + '.*') if not f.endswith('~')] # skip backup files
p.sort() # sort the matches for deterministic order
return p and p[0]
def _template(self, name):
if self._cache is not None:
if name not in self._cache:
self._cache[name] = self._load_template(name)
return self._cache[name]
else:
return self._load_template(name)
def __getattr__(self, name):
t = self._template(name)
if self._base and isinstance(t, Template):
def template(*a, **kw):
return self._base(t(*a, **kw))
return template
else:
return self._template(name)
class GAE_Render(Render):
# Render gets over-written. make a copy here.
super = Render
def __init__(self, loc, *a, **kw):
GAE_Render.super.__init__(self, loc, *a, **kw)
import types
if isinstance(loc, types.ModuleType):
self.mod = loc
else:
name = loc.rstrip('/').replace('/', '.')
self.mod = __import__(name, None, None, ['x'])
self.mod.__dict__.update(kw.get('builtins', TEMPLATE_BUILTINS))
self.mod.__dict__.update(Template.globals)
self.mod.__dict__.update(kw.get('globals', {}))
def _load_template(self, name):
t = getattr(self.mod, name)
import types
if isinstance(t, types.ModuleType):
return GAE_Render(t, cache=self._cache is not None, base=self._base, **self._keywords)
else:
return t
render = Render
# setup render for Google App Engine.
try:
from google import appengine
render = Render = GAE_Render
except ImportError:
pass
def frender(path, **keywords):
"""Creates a template from the given file path.
"""
return Template(open(path).read(), filename=path, **keywords)
def compile_templates(root):
"""Compiles templates to python code."""
re_start = re_compile('^', re.M)
for dirpath, dirnames, filenames in os.walk(root):
filenames = [f for f in filenames if not f.startswith('.') and not f.endswith('~') and not f.startswith('__init__.py')]
for d in dirnames[:]:
if d.startswith('.'):
dirnames.remove(d) # don't visit this dir
out = open(os.path.join(dirpath, '__init__.py'), 'w')
out.write('from web.template import CompiledTemplate, ForLoop, TemplateResult\n\n')
if dirnames:
out.write("import " + ", ".join(dirnames))
out.write("\n")
for f in filenames:
path = os.path.join(dirpath, f)
if '.' in f:
name, _ = f.split('.', 1)
else:
name = f
text = open(path).read()
text = Template.normalize_text(text)
code = Template.generate_code(text, path)
code = code.replace("__template__", name, 1)
out.write(code)
out.write('\n\n')
out.write('%s = CompiledTemplate(%s, %s)\n' % (name, name, repr(path)))
out.write("join_ = %s._join; escape_ = %s._escape\n\n" % (name, name))
# create template to make sure it compiles
t = Template(open(path).read(), path)
out.close()
class ParseError(Exception):
pass
class SecurityError(Exception):
"""The template seems to be trying to do something naughty."""
pass
ALLOWED_AST_NODES = ['Interactive', 'Expression', 'Suite', 'FunctionDef',
'ClassDef', 'Return', 'Delete', 'Assign', 'AugAssign', 'alias',
#'Print', 'Repr',
'For', 'While', 'If', 'With', 'comprehension','NameConstant', 'arg',
#'Raise', 'TryExcept', 'TryFinally', 'Assert', 'Import',
#'ImportFrom', 'Exec', 'Global',
'Expr', 'Pass', 'Break', 'Continue', 'BoolOp', 'BinOp', 'UnaryOp',
'Lambda', 'IfExp', 'Dict', 'Module', 'arguments', 'keyword',
'Set', 'ListComp', 'SetComp', 'DictComp', 'GeneratorExp', 'Yield',
'Compare', 'Call', 'Num', 'Str', 'Attribute', 'Subscript',
'Name', 'List', 'Tuple', 'Load', 'Store', 'Del', 'AugLoad', 'AugStore',
'Param', 'Ellipsis', 'Slice', 'ExtSlice', 'Index', 'And', 'Or', 'Add',
'Sub', 'Mult', 'Div', 'Mod', 'Pow', 'LShift', 'RShift', 'BitOr', 'BitXor',
'BitAnd', 'FloorDiv', 'Invert', 'Not', 'UAdd', 'USub', 'Eq', 'NotEq',
'Lt', 'LtE', 'Gt', 'GtE', 'Is', 'IsNot', 'In', 'NotIn', 'ExceptHandler']
class SafeVisitor(ast.NodeVisitor):
"""
Make sure code is safe by walking through the AST.
Code considered unsafe if:
* it has restricted AST nodes (only nodes defined in ALLOWED_AST_NODES are allowed)
* it is trying to assign to attributes
* it is trying to access resricted attributes
Adopted from http://www.zafar.se/bkz/uploads/safe.txt (public domain, Babar K. Zafar)
* Using ast rather than compiler tree, for jython and Py3 support since Py2.6
* Simplified with ast.NodeVisitor class
"""
def __init__(self, *args, **kwargs):
"Initialize visitor by generating callbacks for all AST node types."
super(SafeVisitor, self).__init__(*args, **kwargs)
self.errors = []
def walk(self, tree, filename):
"Validate each node in AST and raise SecurityError if the code is not safe."
self.filename = filename
self.visit(tree)
if self.errors:
raise SecurityError('\n'.join([str(err) for err in self.errors]))
def generic_visit(self, node):
nodename = type(node).__name__
if nodename not in ALLOWED_AST_NODES:
self.fail_name(node, nodename)
super(SafeVisitor, self).generic_visit(node)
def visit_Attribute(self, node):
attrname = self.get_node_attr(node)
if self.is_unallowed_attr(attrname):
self.fail_attribute(node, attrname)
super(SafeVisitor, self).generic_visit(node)
def visit_Assign(self, node):
self.check_assign_targets(node)
def visit_AugAssign(self, node):
self.check_assign_target(node)
def check_assign_targets(self, node):
for target in node.targets:
self.check_assign_target(target)
super(SafeVisitor, self).generic_visit(node)
def check_assign_target(self, targetnode):
targetname = type(targetnode).__name__
if targetname == "Attribute":
attrname = self.get_node_attr(targetnode)
self.fail_attribute(targetnode, attrname)
# failure modes
def fail_name(self, node, nodename):
lineno = self.get_node_lineno(node)
e = SecurityError("%s:%d - execution of '%s' statements is denied" % (self.filename, lineno, nodename))
self.errors.append(e)
def fail_attribute(self, node, attrname):
lineno = self.get_node_lineno(node)
e = SecurityError("%s:%d - access to attribute '%s' is denied" % (self.filename, lineno, attrname))
self.errors.append(e)
# helpers
def is_unallowed_attr(self, name):
return name.startswith('_') \
or name.startswith('func_') \
or name.startswith('im_')
def get_node_attr(self, node):
return 'attr' in node._fields and node.attr or None
def get_node_lineno(self, node):
return (node.lineno) and node.lineno or 0
class TemplateResult(MutableMapping):
"""Dictionary like object for storing template output.
The result of a template execution is usally a string, but sometimes it
contains attributes set using $var. This class provides a simple
dictionary like interface for storing the output of the template and the
attributes. The output is stored with a special key __body__. Convering
the the TemplateResult to string or unicode returns the value of __body__.
When the template is in execution, the output is generated part by part
and those parts are combined at the end. Parts are added to the
TemplateResult by calling the `extend` method and the parts are combined
seemlessly when __body__ is accessed.
>>> d = TemplateResult(__body__='hello, world', x='foo')
>>> print(d)
hello, world
>>> d.x
'foo'
>>> d = TemplateResult()
>>> d.extend([u'hello', u'world'])
>>> d
"""
def __init__(self, *a, **kw):
self.__dict__["_d"] = dict(*a, **kw)
self._d.setdefault("__body__", u'')
self.__dict__['_parts'] = []
self.__dict__["extend"] = self._parts.extend
self._d.setdefault("__body__", None)
def keys(self):
return self._d.keys()
def _prepare_body(self):
"""Prepare value of __body__ by joining parts.
"""
if self._parts:
value = u"".join(self._parts)
self._parts[:] = []
body = self._d.get('__body__')
if body:
self._d['__body__'] = body + value
else:
self._d['__body__'] = value
def __getitem__(self, name):
if name == "__body__":
self._prepare_body()
return self._d[name]
def __setitem__(self, name, value):
if name == "__body__":
self._prepare_body()
return self._d.__setitem__(name, value)
def __delitem__(self, name):
if name == "__body__":
self._prepare_body()
return self._d.__delitem__(name)
def __getattr__(self, key):
try:
return self[key]
except KeyError as k:
raise AttributeError(k)
def __setattr__(self, key, value):
self[key] = value
def __delattr__(self, key):
try:
del self[key]
except KeyError as k:
raise AttributeError(k)
def __unicode__(self):
self._prepare_body()
return self["__body__"]
def __str__(self):
self._prepare_body()
if PY2:
return self["__body__"].encode('utf-8')
else:
return self["__body__"]
def __repr__(self):
self._prepare_body()
return "" % self._d
def __len__(self):
return self._d.__len__()
def __iter__(self):
for i in self._d.__iter__():
if i == "__body__":
self._prepare_body()
yield i
def test():
r"""Doctest for testing template module.
Define a utility function to run template test.
>>> class TestResult:
... def __init__(self, t): self.t = t
... def __getattr__(self, name): return getattr(self.t, name)
... def __repr__(self): return repr(unicode(self.t) if PY2 else str(self.t))
...
>>> def t(code, **keywords):
... tmpl = Template(code, **keywords)
... return lambda *a, **kw: TestResult(tmpl(*a, **kw))
...
Simple tests.
>>> t('1')()
u'1\n'
>>> t('$def with ()\n1')()
u'1\n'
>>> t('$def with (a)\n$a')(1)
u'1\n'
>>> t('$def with (a=0)\n$a')(1)
u'1\n'
>>> t('$def with (a=0)\n$a')(a=1)
u'1\n'
Test complicated expressions.
>>> t('$def with (x)\n$x.upper()')('hello')
u'HELLO\n'
>>> t('$(2 * 3 + 4 * 5)')()
u'26\n'
>>> t('${2 * 3 + 4 * 5}')()
u'26\n'
>>> t('$def with (limit)\nkeep $(limit)ing.')('go')
u'keep going.\n'
>>> t('$def with (a)\n$a.b[0]')(storage(b=[1]))
u'1\n'
Test html escaping.
>>> t('$def with (x)\n$x', filename='a.html')('')
u'<html>\n'
>>> t('$def with (x)\n$x', filename='a.txt')('')
u'\n'
Test if, for and while.
>>> t('$if 1: 1')()
u'1\n'
>>> t('$if 1:\n 1')()
u'1\n'
>>> t('$if 1:\n 1\\')()
u'1'
>>> t('$if 0: 0\n$elif 1: 1')()
u'1\n'
>>> t('$if 0: 0\n$elif None: 0\n$else: 1')()
u'1\n'
>>> t('$if 0 < 1 and 1 < 2: 1')()
u'1\n'
>>> t('$for x in [1, 2, 3]: $x')()
u'1\n2\n3\n'
>>> t('$def with (d)\n$for k, v in d.items(): $k')({1: 1})
u'1\n'
>>> t('$for x in [1, 2, 3]:\n\t$x')()
u' 1\n 2\n 3\n'
>>> t('$def with (a)\n$while a and a.pop():1')([1, 2, 3])
u'1\n1\n1\n'
The space after : must be ignored.
>>> t('$if True: foo')()
u'foo\n'
Test loop.xxx.
>>> t("$for i in range(5):$loop.index, $loop.parity")()
u'1, odd\n2, even\n3, odd\n4, even\n5, odd\n'
>>> t("$for i in range(2):\n $for j in range(2):$loop.parent.parity $loop.parity")()
u'odd odd\nodd even\neven odd\neven even\n'
Test assignment.
>>> t('$ a = 1\n$a')()
u'1\n'
>>> t('$ a = [1]\n$a[0]')()
u'1\n'
>>> t('$ a = {1: 1}\n$list(a.keys())[0]')()
u'1\n'
>>> t('$ a = []\n$if not a: 1')()
u'1\n'
>>> t('$ a = {}\n$if not a: 1')()
u'1\n'
>>> t('$ a = -1\n$a')()
u'-1\n'
>>> t('$ a = "1"\n$a')()
u'1\n'
Test comments.
>>> t('$# 0')()
u'\n'
>>> t('hello$#comment1\nhello$#comment2')()
u'hello\nhello\n'
>>> t('$#comment0\nhello$#comment1\nhello$#comment2')()
u'\nhello\nhello\n'
Test unicode.
>>> t('$def with (a)\n$a')(u'\u203d')
u'\u203d\n'
>>> t(u'$def with (a)\n$a $:a')(u'\u203d')
u'\u203d \u203d\n'
>>> t(u'$def with ()\nfoo')()
u'foo\n'
>>> def f(x): return x
...
>>> t(u'$def with (f)\n$:f("x")')(f)
u'x\n'
>>> t('$def with (f)\n$:f("x")')(f)
u'x\n'
Test dollar escaping.
>>> t("Stop, $$money isn't evaluated.")()
u"Stop, $money isn't evaluated.\n"
>>> t("Stop, \$money isn't evaluated.")()
u"Stop, $money isn't evaluated.\n"
Test space sensitivity.
>>> t('$def with (x)\n$x')(1)
u'1\n'
>>> t('$def with(x ,y)\n$x')(1, 1)
u'1\n'
>>> t('$(1 + 2*3 + 4)')()
u'11\n'
Make sure globals are working.
>>> t('$x')()
Traceback (most recent call last):
...
NameError: global name 'x' is not defined
>>> t('$x', globals={'x': 1})()
u'1\n'
Can't change globals.
>>> t('$ x = 2\n$x', globals={'x': 1})()
u'2\n'
>>> t('$ x = x + 1\n$x', globals={'x': 1})()
Traceback (most recent call last):
...
UnboundLocalError: local variable 'x' referenced before assignment
Make sure builtins are customizable.
>>> t('$min(1, 2)')()
u'1\n'
>>> t('$min(1, 2)', builtins={})()
Traceback (most recent call last):
...
NameError: global name 'min' is not defined
Test vars.
>>> x = t('$var x: 1')()
>>> x.x
u'1'
>>> x = t('$var x = 1')()
>>> x.x
1
>>> x = t('$var x: \n foo\n bar')()
>>> x.x
u'foo\nbar\n'
Test BOM chars.
>>> t('\xef\xbb\xbf$def with(x)\n$x')('foo')
u'foo\n'
Test for with weird cases.
>>> t('$for i in range(10)[1:5]:\n $i')()
u'1\n2\n3\n4\n'
>>> t("$for k, v in sorted({'a': 1, 'b': 2}.items()):\n $k $v", globals={'sorted':sorted})()
u'a 1\nb 2\n'
Test for syntax error.
>>> try:
... t("$for k, v in ({'a': 1, 'b': 2}.items():\n $k $v")()
... except SyntaxError:
... print("OK")
... else:
... print("Expected SyntaxError")
...
OK
Test datetime.
>>> import datetime
>>> t("$def with (date)\n$date.strftime('%m %Y')")(datetime.datetime(2009, 1, 1))
u'01 2009\n'
"""
pass
if __name__ == "__main__":
import sys
if '--compile' in sys.argv:
compile_templates(sys.argv[2])
else:
import doctest
doctest.testmod()
webpy/web/application.py 0000644 0001750 0001750 00000062414 13146625266 014014 0 ustar wmb wmb """
Web application
(from web.py)
"""
from __future__ import print_function
from . import webapi as web
from . import webapi, wsgi, utils, browser
from .debugerror import debugerror
from . import httpserver
from .utils import lstrips, safeunicode
from .py3helpers import iteritems, string_types, is_iter, PY2, text_type
import sys
import urllib
import traceback
import itertools
import os
import types
from inspect import isclass
import wsgiref.handlers
try:
from urllib.parse import splitquery, urlencode, quote, unquote
except ImportError:
from urllib import splitquery, urlencode, quote, unquote
try:
from importlib import reload #Since Py 3.4 reload is in importlib
except ImportError:
try:
from imp import reload #Since Py 3.0 and before 3.4 reload is in imp
except ImportError:
pass #Before Py 3.0 reload is a global function
from io import BytesIO
__all__ = [
"application", "auto_application",
"subdir_application", "subdomain_application",
"loadhook", "unloadhook",
"autodelegate"
]
class application:
"""
Application to delegate requests based on path.
>>> urls = ("/hello", "hello")
>>> app = application(urls, globals())
>>> class hello:
... def GET(self): return "hello"
>>>
>>> app.request("/hello").data
b'hello'
"""
def __init__(self, mapping=(), fvars={}, autoreload=None):
if autoreload is None:
autoreload = web.config.get('debug', False)
self.init_mapping(mapping)
self.fvars = fvars
self.processors = []
self.add_processor(loadhook(self._load))
self.add_processor(unloadhook(self._unload))
if autoreload:
def main_module_name():
mod = sys.modules['__main__']
file = getattr(mod, '__file__', None) # make sure this works even from python interpreter
return file and os.path.splitext(os.path.basename(file))[0]
def modname(fvars):
"""find name of the module name from fvars."""
file, name = fvars.get('__file__'), fvars.get('__name__')
if file is None or name is None:
return None
if name == '__main__':
# Since the __main__ module can't be reloaded, the module has
# to be imported using its file name.
name = main_module_name()
return name
mapping_name = utils.dictfind(fvars, mapping)
module_name = modname(fvars)
def reload_mapping():
"""loadhook to reload mapping and fvars."""
mod = __import__(module_name, None, None, [''])
mapping = getattr(mod, mapping_name, None)
if mapping:
self.fvars = mod.__dict__
self.init_mapping(mapping)
self.add_processor(loadhook(Reloader()))
if mapping_name and module_name:
self.add_processor(loadhook(reload_mapping))
# load __main__ module usings its filename, so that it can be reloaded.
if main_module_name() and '__main__' in sys.argv:
try:
__import__(main_module_name())
except ImportError:
pass
def _load(self):
web.ctx.app_stack.append(self)
def _unload(self):
web.ctx.app_stack = web.ctx.app_stack[:-1]
if web.ctx.app_stack:
# this is a sub-application, revert ctx to earlier state.
oldctx = web.ctx.get('_oldctx')
if oldctx:
web.ctx.home = oldctx.home
web.ctx.homepath = oldctx.homepath
web.ctx.path = oldctx.path
web.ctx.fullpath = oldctx.fullpath
def _cleanup(self):
# Threads can be recycled by WSGI servers.
# Clearing up all thread-local state to avoid interefereing with subsequent requests.
utils.ThreadedDict.clear_all()
def init_mapping(self, mapping):
self.mapping = list(utils.group(mapping, 2))
def add_mapping(self, pattern, classname):
self.mapping.append((pattern, classname))
def add_processor(self, processor):
"""
Adds a processor to the application.
>>> urls = ("/(.*)", "echo")
>>> app = application(urls, globals())
>>> class echo:
... def GET(self, name): return name
...
>>>
>>> def hello(handler): return "hello, " + handler()
...
>>> app.add_processor(hello)
>>> app.request("/web.py").data
b'hello, web.py'
"""
self.processors.append(processor)
def request(self, localpart='/', method='GET', data=None,
host="0.0.0.0:8080", headers=None, https=False, **kw):
"""Makes request to this application for the specified path and method.
Response will be a storage object with data, status and headers.
>>> urls = ("/hello", "hello")
>>> app = application(urls, globals())
>>> class hello:
... def GET(self):
... web.header('Content-Type', 'text/plain')
... return "hello"
...
>>> response = app.request("/hello")
>>> response.data
b'hello'
>>> response.status
'200 OK'
>>> response.headers['Content-Type']
'text/plain'
To use https, use https=True.
>>> urls = ("/redirect", "redirect")
>>> app = application(urls, globals())
>>> class redirect:
... def GET(self): raise web.seeother("/foo")
...
>>> response = app.request("/redirect")
>>> response.headers['Location']
'http://0.0.0.0:8080/foo'
>>> response = app.request("/redirect", https=True)
>>> response.headers['Location']
'https://0.0.0.0:8080/foo'
The headers argument specifies HTTP headers as a mapping object
such as a dict.
>>> urls = ('/ua', 'uaprinter')
>>> class uaprinter:
... def GET(self):
... return 'your user-agent is ' + web.ctx.env['HTTP_USER_AGENT']
...
>>> app = application(urls, globals())
>>> app.request('/ua', headers = {
... 'User-Agent': 'a small jumping bean/1.0 (compatible)'
... }).data
b'your user-agent is a small jumping bean/1.0 (compatible)'
"""
path, maybe_query = splitquery(localpart)
query = maybe_query or ""
if 'env' in kw:
env = kw['env']
else:
env = {}
env = dict(env, HTTP_HOST=host, REQUEST_METHOD=method, PATH_INFO=path, QUERY_STRING=query, HTTPS=str(https))
headers = headers or {}
for k, v in headers.items():
env['HTTP_' + k.upper().replace('-', '_')] = v
if 'HTTP_CONTENT_LENGTH' in env:
env['CONTENT_LENGTH'] = env.pop('HTTP_CONTENT_LENGTH')
if 'HTTP_CONTENT_TYPE' in env:
env['CONTENT_TYPE'] = env.pop('HTTP_CONTENT_TYPE')
if method not in ["HEAD", "GET"]:
data = data or ''
if isinstance(data, dict):
q = urlencode(data)
else:
q = data
env['wsgi.input'] = BytesIO(q.encode('utf-8'))
if 'CONTENT_LENGTH' not in env:
#if not env.get('CONTENT_TYPE', '').lower().startswith('multipart/') and 'CONTENT_LENGTH' not in env:
env['CONTENT_LENGTH'] = len(q)
response = web.storage()
def start_response(status, headers):
response.status = status
response.headers = dict(headers)
response.header_items = headers
data = self.wsgifunc()(env, start_response)
response.data = b"".join(data)
return response
def browser(self):
return browser.AppBrowser(self)
def handle(self):
fn, args = self._match(self.mapping, web.ctx.path)
return self._delegate(fn, self.fvars, args)
def handle_with_processors(self):
def process(processors):
try:
if processors:
p, processors = processors[0], processors[1:]
return p(lambda: process(processors))
else:
return self.handle()
except web.HTTPError:
raise
except (KeyboardInterrupt, SystemExit):
raise
except:
print(traceback.format_exc(), file=web.debug)
raise self.internalerror()
# processors must be applied in the resvere order. (??)
return process(self.processors)
def wsgifunc(self, *middleware):
"""Returns a WSGI-compatible function for this application."""
def peep(iterator):
"""Peeps into an iterator by doing an iteration
and returns an equivalent iterator.
"""
# wsgi requires the headers first
# so we need to do an iteration
# and save the result for later
try:
firstchunk = next(iterator)
except StopIteration:
firstchunk = ''
return itertools.chain([firstchunk], iterator)
def wsgi(env, start_resp):
# clear threadlocal to avoid inteference of previous requests
self._cleanup()
self.load(env)
try:
# allow uppercase methods only
if web.ctx.method.upper() != web.ctx.method:
raise web.nomethod()
result = self.handle_with_processors()
if is_iter(result):
result = peep(result)
else:
result = [result]
except web.HTTPError as e:
result = [e.data]
def build_result(result):
for r in result:
if PY2:
yield utils.safestr(r)
else:
if isinstance(r, bytes):
yield r
elif isinstance(r, string_types):
yield r.encode('utf-8')
else:
yield str(r).encode('utf-8')
result = build_result(result)
status, headers = web.ctx.status, web.ctx.headers
start_resp(status, headers)
def cleanup():
self._cleanup()
yield b'' # force this function to be a generator
return itertools.chain(result, cleanup())
for m in middleware:
wsgi = m(wsgi)
return wsgi
def run(self, *middleware):
"""
Starts handling requests. If called in a CGI or FastCGI context, it will follow
that protocol. If called from the command line, it will start an HTTP
server on the port named in the first command line argument, or, if there
is no argument, on port 8080.
`middleware` is a list of WSGI middleware which is applied to the resulting WSGI
function.
"""
return wsgi.runwsgi(self.wsgifunc(*middleware))
def stop(self):
"""Stops the http server started by run.
"""
if httpserver.server:
httpserver.server.stop()
httpserver.server = None
def cgirun(self, *middleware):
"""
Return a CGI handler. This is mostly useful with Google App Engine.
There you can just do:
main = app.cgirun()
"""
wsgiapp = self.wsgifunc(*middleware)
try:
from google.appengine.ext.webapp.util import run_wsgi_app
return run_wsgi_app(wsgiapp)
except ImportError:
# we're not running from within Google App Engine
return wsgiref.handlers.CGIHandler().run(wsgiapp)
def gaerun(self, *middleware):
"""
Starts the program in a way that will work with Google app engine,
no matter which version you are using (2.5 / 2.7)
If it is 2.5, just normally start it with app.gaerun()
If it is 2.7, make sure to change the app.yaml handler to point to the
global variable that contains the result of app.gaerun()
For example:
in app.yaml (where code.py is where the main code is located)
handlers:
- url: /.*
script: code.app
Make sure that the app variable is globally accessible
"""
wsgiapp = self.wsgifunc(*middleware)
try:
# check what version of python is running
version = sys.version_info[:2]
major = version[0]
minor = version[1]
if major != 2:
raise EnvironmentError("Google App Engine only supports python 2.5 and 2.7")
# if 2.7, return a function that can be run by gae
if minor == 7:
return wsgiapp
# if 2.5, use run_wsgi_app
elif minor == 5:
from google.appengine.ext.webapp.util import run_wsgi_app
return run_wsgi_app(wsgiapp)
else:
raise EnvironmentError("Not a supported platform, use python 2.5 or 2.7")
except ImportError:
return wsgiref.handlers.CGIHandler().run(wsgiapp)
def load(self, env):
"""Initializes ctx using env."""
ctx = web.ctx
ctx.clear()
ctx.status = '200 OK'
ctx.headers = []
ctx.output = ''
ctx.environ = ctx.env = env
ctx.host = env.get('HTTP_HOST')
if env.get('wsgi.url_scheme') in ['http', 'https']:
ctx.protocol = env['wsgi.url_scheme']
elif env.get('HTTPS', '').lower() in ['on', 'true', '1']:
ctx.protocol = 'https'
else:
ctx.protocol = 'http'
ctx.homedomain = ctx.protocol + '://' + env.get('HTTP_HOST', '[unknown]')
ctx.homepath = os.environ.get('REAL_SCRIPT_NAME', env.get('SCRIPT_NAME', ''))
ctx.home = ctx.homedomain + ctx.homepath
#@@ home is changed when the request is handled to a sub-application.
#@@ but the real home is required for doing absolute redirects.
ctx.realhome = ctx.home
ctx.ip = env.get('REMOTE_ADDR')
ctx.method = env.get('REQUEST_METHOD')
ctx.path = env.get('PATH_INFO')
# http://trac.lighttpd.net/trac/ticket/406 requires:
if env.get('SERVER_SOFTWARE', '').startswith('lighttpd/'):
ctx.path = lstrips(env.get('REQUEST_URI').split('?')[0], ctx.homepath)
# Apache and CherryPy webservers unquote the url but lighttpd doesn't.
# unquote explicitly for lighttpd to make ctx.path uniform across all servers.
ctx.path = unquote(ctx.path)
if env.get('QUERY_STRING'):
ctx.query = '?' + env.get('QUERY_STRING', '')
else:
ctx.query = ''
ctx.fullpath = ctx.path + ctx.query
for k, v in iteritems(ctx):
# convert all string values to unicode values and replace
# malformed data with a suitable replacement marker.
if isinstance(v, bytes):
ctx[k] = v.decode('utf-8', 'replace')
# status must always be str
ctx.status = '200 OK'
ctx.app_stack = []
def _delegate(self, f, fvars, args=[]):
def handle_class(cls):
meth = web.ctx.method
if meth == 'HEAD' and not hasattr(cls, meth):
meth = 'GET'
if not hasattr(cls, meth):
raise web.nomethod(cls)
tocall = getattr(cls(), meth)
return tocall(*args)
if f is None:
raise web.notfound()
elif isinstance(f, application):
return f.handle_with_processors()
elif isclass(f):
return handle_class(f)
elif isinstance(f, string_types):
if f.startswith('redirect '):
url = f.split(' ', 1)[1]
if web.ctx.method == "GET":
x = web.ctx.env.get('QUERY_STRING', '')
if x:
url += '?' + x
raise web.redirect(url)
elif '.' in f:
mod, cls = f.rsplit('.', 1)
mod = __import__(mod, None, None, [''])
cls = getattr(mod, cls)
else:
cls = fvars[f]
return handle_class(cls)
elif hasattr(f, '__call__'):
return f()
else:
return web.notfound()
def _match(self, mapping, value):
for pat, what in mapping:
if isinstance(what, application):
if value.startswith(pat):
f = lambda: self._delegate_sub_application(pat, what)
return f, None
else:
continue
elif isinstance(what, string_types):
what, result = utils.re_subm(r'^%s\Z' % (pat,), what, value)
else:
result = utils.re_compile(r'^%s\Z' % (pat,)).match(value)
if result: # it's a match
return what, [x for x in result.groups()]
return None, None
def _delegate_sub_application(self, dir, app):
"""Deletes request to sub application `app` rooted at the directory `dir`.
The home, homepath, path and fullpath values in web.ctx are updated to mimic request
to the subapp and are restored after it is handled.
@@Any issues with when used with yield?
"""
web.ctx._oldctx = web.storage(web.ctx)
web.ctx.home += dir
web.ctx.homepath += dir
web.ctx.path = web.ctx.path[len(dir):]
web.ctx.fullpath = web.ctx.fullpath[len(dir):]
return app.handle_with_processors()
def get_parent_app(self):
if self in web.ctx.app_stack:
index = web.ctx.app_stack.index(self)
if index > 0:
return web.ctx.app_stack[index-1]
def notfound(self):
"""Returns HTTPError with '404 not found' message"""
parent = self.get_parent_app()
if parent:
return parent.notfound()
else:
return web._NotFound()
def internalerror(self):
"""Returns HTTPError with '500 internal error' message"""
parent = self.get_parent_app()
if parent:
return parent.internalerror()
elif web.config.get('debug'):
return debugerror()
else:
return web._InternalError()
def with_metaclass(mcls):
def decorator(cls):
body = vars(cls).copy()
# clean out class body
body.pop('__dict__', None)
body.pop('__weakref__', None)
return mcls(cls.__name__, cls.__bases__, body)
return decorator
class auto_application(application):
"""Application similar to `application` but urls are constructed
automatically using metaclass.
>>> app = auto_application()
>>> class hello(app.page):
... def GET(self): return "hello, world"
...
>>> class foo(app.page):
... path = '/foo/.*'
... def GET(self): return "foo"
>>> app.request("/hello").data
b'hello, world'
>>> app.request('/foo/bar').data
b'foo'
"""
def __init__(self):
application.__init__(self)
class metapage(type):
def __init__(klass, name, bases, attrs):
type.__init__(klass, name, bases, attrs)
path = attrs.get('path', '/' + name)
# path can be specified as None to ignore that class
# typically required to create a abstract base class.
if path is not None:
self.add_mapping(path, klass)
@with_metaclass(metapage) #little hack needed or Py2 and Py3 compatibility
class page():
path = None
self.page = page
# The application class already has the required functionality of subdir_application
subdir_application = application
class subdomain_application(application):
"""
Application to delegate requests based on the host.
>>> urls = ("/hello", "hello")
>>> app = application(urls, globals())
>>> class hello:
... def GET(self): return "hello"
>>>
>>> mapping = (r"hello\.example\.com", app)
>>> app2 = subdomain_application(mapping)
>>> app2.request("/hello", host="hello.example.com").data
b'hello'
>>> response = app2.request("/hello", host="something.example.com")
>>> response.status
'404 Not Found'
>>> response.data
b'not found'
"""
def handle(self):
host = web.ctx.host.split(':')[0] #strip port
fn, args = self._match(self.mapping, host)
return self._delegate(fn, self.fvars, args)
def _match(self, mapping, value):
for pat, what in mapping:
if isinstance(what, string_types):
what, result = utils.re_subm('^' + pat + '$', what, value)
else:
result = utils.re_compile('^' + pat + '$').match(value)
if result: # it's a match
return what, [x for x in result.groups()]
return None, None
def loadhook(h):
"""
Converts a load hook into an application processor.
>>> app = auto_application()
>>> def f(): "something done before handling request"
...
>>> app.add_processor(loadhook(f))
"""
def processor(handler):
h()
return handler()
return processor
def unloadhook(h):
"""
Converts an unload hook into an application processor.
>>> app = auto_application()
>>> def f(): "something done after handling request"
...
>>> app.add_processor(unloadhook(f))
"""
def processor(handler):
try:
result = handler()
is_gen = is_iter(result)
except:
# run the hook even when handler raises some exception
h()
raise
if is_gen:
return wrap(result)
else:
h()
return result
def wrap(result):
def next_hook():
try:
return next(result)
except:
# call the hook at the and of iterator
h()
raise
result = iter(result)
while True:
yield next_hook()
return processor
def autodelegate(prefix=''):
"""
Returns a method that takes one argument and calls the method named prefix+arg,
calling `notfound()` if there isn't one. Example:
urls = ('/prefs/(.*)', 'prefs')
class prefs:
GET = autodelegate('GET_')
def GET_password(self): pass
def GET_privacy(self): pass
`GET_password` would get called for `/prefs/password` while `GET_privacy` for
`GET_privacy` gets called for `/prefs/privacy`.
If a user visits `/prefs/password/change` then `GET_password(self, '/change')`
is called.
"""
def internal(self, arg):
if '/' in arg:
first, rest = arg.split('/', 1)
func = prefix + first
args = ['/' + rest]
else:
func = prefix + arg
args = []
if hasattr(self, func):
try:
return getattr(self, func)(*args)
except TypeError:
raise web.notfound()
else:
raise web.notfound()
return internal
class Reloader:
"""Checks to see if any loaded modules have changed on disk and,
if so, reloads them.
"""
"""File suffix of compiled modules."""
if sys.platform.startswith('java'):
SUFFIX = '$py.class'
else:
SUFFIX = '.pyc'
def __init__(self):
self.mtimes = {}
def __call__(self):
for mod in sys.modules.values():
self.check(mod)
def check(self, mod):
# jython registers java packages as modules but they either
# don't have a __file__ attribute or its value is None
if not (mod and hasattr(mod, '__file__') and mod.__file__):
return
try:
mtime = os.stat(mod.__file__).st_mtime
except (OSError, IOError):
return
if mod.__file__.endswith(self.__class__.SUFFIX) and os.path.exists(mod.__file__[:-1]):
mtime = max(os.stat(mod.__file__[:-1]).st_mtime, mtime)
if mod not in self.mtimes:
self.mtimes[mod] = mtime
elif self.mtimes[mod] < mtime:
try:
reload(mod)
self.mtimes[mod] = mtime
except ImportError:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
webpy/web/py3helpers.py 0000644 0001750 0001750 00000001555 13146625266 013606 0 ustar wmb wmb """Utilities for make the code run both on Python2 and Python3.
"""
import sys
PY2 = sys.version_info[0] == 2
# urljoin
if PY2:
from urlparse import urljoin
else:
from urllib.parse import urljoin
# Dictionary iteration
if PY2:
iterkeys = lambda d: d.iterkeys()
itervalues = lambda d: d.itervalues()
iteritems = lambda d: d.iteritems()
else:
iterkeys = lambda d: iter(d.keys())
itervalues = lambda d: iter(d.values())
iteritems = lambda d: iter(d.items())
# string and text types
if PY2:
text_type = unicode
string_types = (str, unicode)
numeric_types = (int, long)
else:
text_type = str
string_types = (str,)
numeric_types = (int,)
if PY2:
is_iter = lambda x: x and hasattr(x, 'next')
else:
is_iter = lambda x: x and hasattr(x, '__next__')
# imap
if PY2:
from itertools import imap
else:
imap = map