vhost-0.10.0/.cargo_vcs_info.json0000644000000001430000000000100122530ustar { "git": { "sha1": "c6c936d8a7839d6865452055ff9b7c83fb13f4c5" }, "path_in_vcs": "vhost" }vhost-0.10.0/CHANGELOG.md000064400000000000000000000074601046102023000126650ustar 00000000000000# Changelog ## [Unreleased] ### Added ### Changed ### Fixed ### Deprecated ## [0.10.0] ### Changed - [[#219]](https://github.com/rust-vmm/vhost/pull/219) Update vmm-sys-util dependency to 0.12.1. ### Remove - [[#202](https://github.com/rust-vmm/vhost/pull/202)] Do not expose for internal-usage-only `NOOP` and `MAX_CMD` requests. - [[#205](https://github.com/rust-vmm/vhost/pull/205)] Remove some commented out code. ### Fixed - [[#208](https://github.com/rust-vmm/vhost/pull/208)] Fix various message structs being `repr(Rust)` instead of `repr(C)`. ## [0.9.0] ### Changed - [[#187]](https://github.com/rust-vmm/vhost/pull/187) Clean master slave - Replaced master/slave with frontend/backend in the codebase and public API. - Replaced master/slave with frontend/backend in the crate features. - Updated dependency bitflags from 1.0 to 2.4 - [[#116]](https://github.com/rust-vmm/vhost/pull/116) Upgrade to 2021 edition ### Fixed - [[#184]](https://github.com/rust-vmm/vhost/pull/184) Safety fixes - [[#186]](https://github.com/rust-vmm/vhost/pull/186) vhost: Fix clippy warnings. ## [0.8.1] ### Fixed - [[#175]](https://github.com/rust-vmm/vhost/pull/175) vhost: Always enable vm-memory/backend-mmap ## [0.8.0] ### Added - [[#169]](https://github.com/rust-vmm/vhost/pull/160) vhost: Add xen memory mapping support ### Fixed - [[#165]](https://github.com/rust-vmm/vhost/pull/165) vhost: vdpa: Provide custom set_vring_addr() implementation - [[#172]](https://github.com/rust-vmm/vhost/pull/172) Vhost user fix ## [0.7.0] ### Added - [[#137]](https://github.com/rust-vmm/vhost/pull/137) vhost_user: add Error::Disconnected ### Changed - Updated dependency vm-memory 0.10.0 to 0.11.0 ### Fixed - [[#135]](https://github.com/rust-vmm/vhost/pull/135) vhost_user: fix UB on invalid master request - [[#136]](https://github.com/rust-vmm/vhost/pull/136) vhost_user: fix unsound send_message functions - [[#153]](https://github.com/rust-vmm/vhost/pull/153) Fix set_vring_addr issues ### Deprecated ## [0.6.0] ### Upgraded - vm-memory from 0.9 to 0.10 - vmm-sys-util from 0.10 to 0.11 ## [0.5.0] ### Changed - [[#113]](https://github.com/rust-vmm/vhost/pull/113) Improved error messages. - [[#115]](https://github.com/rust-vmm/vhost/pull/115) Use caret requirements for dependencies. ## [v0.4.0] ### Added - [[#109]](https://github.com/rust-vmm/vhost/pull/109) vhost_kern: vdpa: Override the implementation of valid() ### Fixed - [[#102]](https://github.com/rust-vmm/vhost/pull/102) Fix warnings and update test coverage - [[#104]](https://github.com/rust-vmm/vhost/pull/104) fix CODEOWNERS file - [[#107]](https://github.com/rust-vmm/vhost/pull/107) vhost_kern/vdpa: fix get_iova_range() ## [v0.3.0] ### Added - [[#92]](https://github.com/rust-vmm/vhost/pull/92) implement vhost_net backend - [[#97]](https://github.com/rust-vmm/vhost/pull/97) add method to restore Vdpa objects ### Changed - [[#90]](https://github.com/rust-vmm/vhost/pull/90) add vdpa and vhost-vdpa simple description - [[#90]](https://github.com/rust-vmm/vhost/pull/90) use vmm_sys_util::fam for vhost_vdpa_config - [[#95]](https://github.com/rust-vmm/vhost/pull/95) relax vm-memory dependency - [[#98]](https://github.com/rust-vmm/vhost/pull/98) generate documentation for doc.rs with all features enabled ### Fixed - [[#98]](https://github.com/rust-vmm/vhost/pull/98) fix a bug in SlaveReqHandler::set_config() which passes wrong configuration data to backend ### Deprecated - [[#90]](https://github.com/rust-vmm/vhost/pull/90) remove parse_iotlb_msg ## [v0.2.0] ### Added - [[#74]](https://github.com/rust-vmm/vhost/pull/74) Implement FromRawFd for Listener - [[#33]](https://github.com/rust-vmm/vhost/pull/33) Add vhost-vDPA support (in-kernel) ### Changed - [[#68]](https://github.com/rust-vmm/vhost/pull/68) Enforce ByteValued for received structs ## [v0.1.0] First release vhost-0.10.0/Cargo.toml0000644000000026550000000000100102630ustar # THIS FILE IS AUTOMATICALLY GENERATED BY CARGO # # When uploading crates to the registry Cargo will automatically # "normalize" Cargo.toml files for maximal compatibility # with all versions of Cargo and also rewrite `path` dependencies # to registry (e.g., crates.io) dependencies. # # If you are reading this file be aware that the original Cargo.toml # will likely look very different (and much more reasonable). # See Cargo.toml.orig for the original contents. [package] edition = "2021" name = "vhost" version = "0.10.0" authors = ["Liu Jiang "] description = "a pure rust library for vdpa, vhost and vhost-user" documentation = "https://docs.rs/vhost" readme = "README.md" keywords = [ "vhost", "vhost-user", "virtio", "vdpa", ] license = "Apache-2.0 OR BSD-3-Clause" repository = "https://github.com/rust-vmm/vhost" [package.metadata.docs.rs] all-features = true [dependencies.bitflags] version = "2.4" [dependencies.libc] version = "0.2.39" [dependencies.vm-memory] version = "0.14.0" features = ["backend-mmap"] [dependencies.vmm-sys-util] version = "0.12.1" [dev-dependencies.serial_test] version = "2.0" [dev-dependencies.tempfile] version = "3.2.0" [features] default = [] test-utils = [] vhost-kern = [] vhost-net = ["vhost-kern"] vhost-user = [] vhost-user-backend = ["vhost-user"] vhost-user-frontend = ["vhost-user"] vhost-vdpa = ["vhost-kern"] vhost-vsock = [] xen = ["vm-memory/xen"] vhost-0.10.0/Cargo.toml.orig000064400000000000000000000015411046102023000137350ustar 00000000000000[package] name = "vhost" version = "0.10.0" keywords = ["vhost", "vhost-user", "virtio", "vdpa"] description = "a pure rust library for vdpa, vhost and vhost-user" authors = ["Liu Jiang "] repository = "https://github.com/rust-vmm/vhost" documentation = "https://docs.rs/vhost" readme = "README.md" license = "Apache-2.0 OR BSD-3-Clause" edition = "2021" [package.metadata.docs.rs] all-features = true [features] default = [] test-utils = [] vhost-vsock = [] vhost-kern = [] vhost-vdpa = ["vhost-kern"] vhost-net = ["vhost-kern"] vhost-user = [] vhost-user-frontend = ["vhost-user"] vhost-user-backend = ["vhost-user"] xen = ["vm-memory/xen"] [dependencies] bitflags = "2.4" libc = "0.2.39" vmm-sys-util = "0.12.1" vm-memory = { version = "0.14.0", features=["backend-mmap"] } [dev-dependencies] tempfile = "3.2.0" serial_test = "2.0" vhost-0.10.0/README.md000064400000000000000000000046261046102023000123340ustar 00000000000000# vHost A pure rust library for vDPA, vhost and vhost-user. The `vhost` crate aims to help implementing dataplane for virtio backend drivers. It supports three different types of dataplane drivers: - vhost: the dataplane is implemented by linux kernel - vhost-user: the dataplane is implemented by dedicated vhost-user servers - vDPA(vhost DataPath Accelerator): the dataplane is implemented by hardwares The main relationship among Traits and Structs exported by the `vhost` crate is as below: ![vhost Architecture](/docs/vhost_architecture.png) ## Kernel-based vHost Backend Drivers The vhost drivers in Linux provide in-kernel virtio device emulation. Normally the hypervisor userspace process emulates I/O accesses from the guest. Vhost puts virtio emulation code into the kernel, taking hypervisor userspace out of the picture. This allows device emulation code to directly call into kernel subsystems instead of performing system calls from userspace. The hypervisor relies on ioctl based interfaces to control those in-kernel vhost drivers, such as vhost-net, vhost-scsi and vhost-vsock etc. ## vHost-user Backend Drivers The [vhost-user protocol](https://qemu.readthedocs.io/en/latest/interop/vhost-user.html#communication) aims to implement vhost backend drivers in userspace, which complements the ioctl interface used to control the vhost implementation in the Linux kernel. It implements the control plane needed to establish virtqueue sharing with a user space process on the same host. It uses communication over a Unix domain socket to share file descriptors in the ancillary data of the message. The protocol defines two sides of the communication, frontend and backend. Frontend is the application that shares its virtqueues, backend is the consumer of the virtqueues. Frontend and backend can be either a client (i.e. connecting) or server (listening) in the socket communication. ## Xen support Supporting Xen requires special handling while mapping the guest memory. The `vm-memory` crate implements xen memory mapping support via a separate feature `xen`, and this crate uses the same feature name to enable Xen support. Also, for xen mappings, the memory regions passed by the frontend contains few extra fields as described in the vhost-user protocol documentation. It was decided by the `rust-vmm` maintainers to keep the interface simple and build the crate for either standard Unix memory mapping or Xen, and not both. vhost-0.10.0/docs/vhost_architecture.drawio000064400000000000000000000471761046102023000171300ustar 00000000000000 vhost-0.10.0/docs/vhost_architecture.png000064400000000000000000004352321046102023000164210ustar 00000000000000PNG  IHDR-EiCCPICC Profile(c``I,(aa``+) rwRR` ͠`\\TQk .Ȭ"̒OO.h ّ艩p'?@\PTd+@HQ@;b'AGjB@@rF" @Nx:j/({p.$D;Teg(8C)U3/YOGȐ՟oÒQ!8(``Ap+BL![s$%8: {100_VeXIfMM*iDASCIIScreenshot qiTXtXML:com.adobe.xmp 1279 Screenshot 651 @IDATx E ưˎYAAE"}_HQ'\+bqB>Z|hXmkfڣ\Kc#_VLzR2΁|'"PҸ!ưDJS7ϟ88rGI> @>6K疇O+iJ1ex̙RG=$@$@$@ULiӦUq Y4   J?RꨇH`h}-[f/_^iG$@$@$P_xqE"  4,YRiG$*>K$@$@$@$@$@$@$@$@$@ 4\&]z=ʵ+zlx .Wv_ qn/z.O:!@iJ\;rK^9@4pJ/YKO @ODHɥoqﻼ}o s~{i7dy1پl_ WNɅ8?dMlӔ.lqA>#}G9%o#MNȧ8[[=49۷vڗ~x;N#Tƛ Xj^{49 q~#MfؾeaA?@ޑ.oyQKO:!yVrK Xˍ#Z%>O4-wRDR?[KZ(8l4cyc ڷoQ{ZʟN<|lO?Ɛ}Y}Tqo3m۶-z8AWGoq?qиAǗޯ'O䃽w>?1~IDJS7䪖﷭[*Q0`9>}zўZr ~o׮]\vhE\MN @C-|06e!Y7 @;ۘ5$  !j@Iƿ4/E~5y#5ξ$MG|9DJS7e{|OD CKx>Qˏ$@$@$@E$lƌS[5eiIHHHH8蠃J؂$@%KUV]:R T;ϭbHHHHHHH ̚5`2 @mhv_ϟ??s`'N? G#BWA@8'miraѹODAPm  LmڴۣVNML|>N>[I#!}/E</N.EVGgh.G$EWXyW[&gA6WZ9k}ӞsCC^^ҹsg7guV|=m43c 3|XRCoA:O(|_:7zO:gҤIfѢEfȐ!5Ml|#H''"#_6ACC0y8~un:ui|Ѐ v{QОk,?}# :?i +*i?xأGC ݻsߟiNքHHR >f3 ڵkg@$@$@$Pί6f IHH@/$x$"@^,- d&\\d̓swver墫\=R_$+~ AWŽsEs gs|ziz49gzD9ƗB}HE^yHp;Mv_ Cm&q~;Ҋ G?GzλʩɩM}5ΚAi {O-C2w =FBԮ=GO?5poңO#'Y9!_#d#'ҋZV&p6HGG^ZzղƗ>^у>gO:) eN>QO')7{{rj雲Q&Gjy2Q DYV~hr?joIД|Ҟy'ڐ$@$@$@$@$@$@$@$@$Pw*K.fu"  (&йsgӡCJHHH~wر @SNvz > @ ~hgֲT/UDd8+])VC&v~S67!@:)TH^!$ΕI:%OyK ֯]5O)cL3Vd85=\tA'o~h9\3i诸_҆ɩ?"q#>O47I WNɅ8?|#2Lx+KiG t#u-qoɇ|z C{裏̌3L޽Q8hOʡZ5,,?(q.Γ|@|" :A#?zL:,Zl5gMS07_>i^WS'ߏࠍ ePx̣irjLbiύ<+Si(  @}Xj @{ۘ5O7'&֊HHHHHHHHj@Žvo߾H$@$@$@ / $@$@$@UC7ް[y@p ÇO}wb"һXpy7ܹsFmdc7xcV['I&Źjz_Hr*K.fŊe{þTq\KWNQҸD9O92j/WOmv!FW [Zr֟I1T&9Ҋ G?G.kٳ!-bv_y>CC'|0`}6/Rgnݺ,%#5A1 3@) k/TdVCrOzPD|9ȳG?䕋N>譵~~d:>~\Rh{P?/j?~v5 gyy䑱H+gH>nܸ8~[9!r 49hH{njK7M6VTnDozВݿʧ~qcz{iZo/`LoDs Ɓ4n\"~E>Hs~><+G|ݐգ~y"kO)q]4s1( 83g4dݻ-2#%I>"[Elj/7hg.Xjݶm[{l>49kOK/s#OVnj3 : @;ڴi_Q֐HH|;~֬Y ?Z;A?@$<*n[lY|yԆw%  X_x:'oF$@$@$4'YO=`/ @y C`.OXG%ï8 IDH`񯡡!uRCބHHHGd7b   &%СC3lذ=w"dytA }! K+GG%kɆӧR`l膐|qK!w)!<9`y{Rh~ qO>8s# Ρ~tܹ2jZyʛ},X+|.Y$1VSKߔjj꟥(j^z%An9C?~?cчo;f b??;wM׾}{3`g뭷6FǛL4:"}7l3{5nƊלh~mC'5gtlzi6tSӯ_?Ͻp!{˳>kfϞm qro=v1d[HT_ .4?pl믛ɓ'3 z 7X|m c}]}&Nj`'7N/%! >8AtdЫW/WÈV i%_ܗ }^9P΍|@|" :m!Ǘ΍|΁|'"PҸ!_S^fѵWNaODI2ru<6Gqo 0|ɱ̵ ^y2 ;찃55H/mE;[oe~_(>bpiC=Ԟ=-Zd kh 0]xᅦT~z3qD:\^X^xy䑠(aApM7|00{SOU q5'0?0w}_94ʼnFYBx9K?q/KZNSq˂kN$@$@$N|(=%cIHHjf3{Ks{m׮fm o7ȬDo[;˗3.2bŊD4 X XF  Pp%۷̸; >.I3Ʋyn,_Y֐HHZ쩷F%40.v޽{L0/lg-O30{W {]s5_e8LCe~8cW[Yv}ް47M"W_mnX>*(A\կ>r? X*# dSV˱1DzcGs'=0$b#̕W^<_\8>[oث^Cʕ+mdcD "Cypper墫\=R_ gs`X'??8T[;}@ݲ>O4/6r]Hp~>ĕO\M2I!Vd85=\tp?Ill;Z wH???Ϲk.>pثpb_7|3uQo{ w`&fwqq8'3Mc1 ~ M+rC1=~0C)bwNpX}D0/7pFq ao??yEvmg?|k=$d9\γHʢAkER !4Q҄ .\Q'>h|GZ?wZrhȧzlm_OٽʓW)hs>Cmj,0=ܲC?T`2PzfJ{L`rM݀墘]4g,م7qVx-7aO>pm}!sck1O<~H+ͧCˢ-縇Y`y!_X k| @>`PxD_}&Mb/SN9轏%ڃ:Ȧ/ э +b6x̀Z7qMۙR~xukfӻQ\={4w]󆆆Ȥq*.? VyrXI/,\;0+K/8у[mU )O:8y`OI4R~Q&*~S!Y=AjU7 @;ۘ5$ h>`_~ֿ>_ a/W“8ߡn$[LMLnBi̽7^{/QYnÍtӍz>a;R&z%ȱc^#ϱ1#F-(p'/؏/Kq?K !CҒe\ōuI"      F`NPqƙO<1A27 4`YfyuufqrT,=gisl8%`!<"k1f'ގaRc!;`H( +\ƿ,jFl?[F\xkF_1Je+]x${r^qKcj F>01 TxiC駄)S;zd}mCW_M pƑ&|C˭W=A'jr9Unϼu}?"@>j;P/+\~[p Qƻ5sϑIk@gIŵLn81͓(KD]X|}w5XG(GW[_~wyða 4o[V\KG,07Xѣ,`ug\bR#,_|[N\2Z8+G_e!ByP+άLV 1U%⋩nȢ?)R?Wy~ESK>]H]9 x2[K"Sl.M&~FC<'sq|ꕏO4yWN> +Zz0(|PxDNƩ&ϢQ4%U[~// )\q-u|g ]'^W #0 W^1Ǐ7/hn8q mFʉՃK+?IzQ4izO8By}s]M&>rc,ߗblip0oV~q3Eȣ?-=՟ȣny[n(CktJܹsg-K',5&CR.a`g   Y @8HqONRPwK":ӧO.ؖ]6.=%5:af;-||{߳|>lQ1;9ʐ43 K^d7guɼ,_ L%tPq{.CH2aK/q7oxfaz  :%wĦNj @%XOӬZsӮs%t`ȔdXzw^J0lI,uf {bv" ei3ӂg?YZrcnJCw?K?Ix] Pq_5Sz}vu~N9b?Y5BZj?\t`-HHHHHH'5wЄuIh 6(Z Or9#lG]~ "K.1x^{]^-X ۏǵKe1p[yE2_z뭄8M8^T/faeo#`.SpX |<מcK+'/}!/CE#mZroI1T&9Ҋ G?G,z,^k%pN PF2m|aF8|m*ڣAKz7|"7cʽx敓_u^ZzB淑{] _0ha[gF\y~^} f|衇o|Әf8k{G=9xI67xcU r?lN0[o,ʮO \r9 fϗdEGC변FVaX =U8Jte K(#HPO?>[Fe[(GW.yq_*g+k1vaĒ[lhM*1E!o7 `o>BLn N6QayA˃>(G-׿.)[wGנ %nsˏk-}^9tQΓ|@|" :m!Ǘέ^HqiD+yzB?a>25Q~'b?' GnOmjݕkTC~ @V~1PH~wRGtj{|  -__q]sy=,9LC6_,Ō4׀%;YcϞ=ƿٳg3;8}M FY|t 97?G[Oڥ\,оXxgI[QGevqG%ągz ^zW;bI1mjT/4$J3O*]@XCJߧ} ;޳Bbʽ%H2kGCk׮fΜ9V-g6EGɯ@)?1aƢ;3nA ӗ%a!HH T?m֐HHGJid9\g9(GnhLwؽ{c+Š>,0˜0 "GU| ]w7ߴ[=-N1L:|4+ed%0Raȑ]l mlIH? ZQWlꫯ,`Þn",D_cGlvWl=Ph?l9"V4ŸAc{&Ҿjf?^/Z7dF<0P3 r U ՘ n %e=2      h0R|#fʊ9w kz\9 fb=E7}0aY/r} n_pH?a!<~'m熼zܼ-TE``oE+GJ臅yo= #&B 0a`zƏp8 r]-840@J{m:/qp}GK-E3o<z$&G[kz49GD'OD4?qC.>&+,X ,|;d9|9#|'"s(e0*Vy)!0a|;6[/vhs8ATi7LrWN#[~ɣ+ƍ|'"΁|~R17hD#/{|!]Y%|O J 䓿Sxjz?w+#鹁?qUMS^ >iύ=!69~˭$@$@$@$@$@$@$@$@MC?,'Z ӎI&wM5; x)'zqu>Ɲ?[[DVЇ\_  )R%  ^|Wo۰d$Faʕ6^ʵd E48w`+V$dG7m)rS馛60 0^~q=DVXiDXqg{Z Wro9qXpke ? }X ?= R~/t|\Ⲧhr("BM.}wsW>m__hz7|!ԯDC{@>kiED"ñrT#T&J+2M.ڵ+do^Z959FⲶYS?4-a/ eH_AE#$Y ]Hq= 믿).| _gavE\5H#p{ƿ7xn(:?~Ǘa/^o}f{ge#r2$/kzM&s gZ+|oOD@WC>pIH?sPrKѣGA|~ү}ؽfZJɫ}kқgӸFws? },A/B䮓xwd!=G^9TZ?޺?x,Bdxgq86V8cJ߳gOvq,z8c@?B˓B5y' %9h Ht}AI㦕GKѣ_o(8"BM\||OD C~WAC^9KѣݗA@ođm)e<}A67|f%Cs>+ߌ3 Re]^oQvx y=+Z5K.5+6{⺒miӦMٟ{7 @M9s6mZM&  G?0_&&hv7a:R%UړrO>fM6$9bz-裏"ۛv%dPwR2P @m;ŵYxHHH LT{?2&& Pq_UԪB^1Oh$"Kܹr-oQMq{ fJWpH$@$@$@$@$@$@$@5A~1 k]" !Jʕrb- qw~ 3p:pO䗣aFĉ;u^9".7ޭm%K\9fFJK/yE!t r\F畋.yh8gc yƩ&~?~1{˖.rr}m3~o:Z#c"T+w܅t G۴ q?W~)cLs~zM&]Y}կSS?+{g?o+4a: {ի?/>$M8Џ'<EjW{챇馛ܤСC_7="[pCzWk~xuFnweHC:kz49G'<+pC^<FW>yQKO>IM:w+JY t{] G{C츮kVCӛ9A}ÿ}Wyhƿ86V#rэ#}o,XR /&LpsBx'cAu 06=U82sAKW]ԯ$ Aws#gs !j3:w+G_w)zR?8 >G/F7ی|DrqH{n@]5@W)è~/:HHHHHHHj@ōK7ڱz`;ښYwZw߽( K~۶m[$#g53 @O$@$@B{Μ9S.y$  c|qjuMҵ[`AKl7C9 6t%t]YcW_}ur]-OcXv_x~{]V?S 侍釱Y5 @m{ѢE@]HHH p){e @_x"Yx.ɚ^dMOQgAKO<ɇ|"8ȧ~RmJu/?HqkY)T+[Hp~>ĕO\M2I!Vd85=\teՓWVNMN U$.kj59@rP[dH+9ڋGk EHwKFZYgJHYr&sG>rs|+!dZ޼TK_~r}=q`GGک)m_gmߊ{ S/Ֆ散bY -B~7"'Dt_!&G.>~\|cPok-yQKݷگV߼Z惲#l AKO D\K_x6>}ޯkT Q94eI{n\Y7 @;ۘ5$  !jt^,- d&Pq<,]4sHHHv pڭKN$@$@$~@o֬YfUSAHHHG5 HHHjWMyBBq@K%0}t[wqϟVJ )E</ի"" ȇ|>GF> `+~/W?yZz DOW{37 C> G䫦./^x>̙cYϞ=ͦnjt=I>ߖ/_ny3i$3qD':t0z2iNQ_{^{ewnx7x|6w]3yd{.]: {J|ʕPv3@IDATN&L0SLu9s.]<{キzc/}Q?=xo+ol$u _+O^9QӞy߀ ,:wl}?`"ϣy×(Af/|mircaaދrhi ?E2?QkiGo^9:uE>GrC?C%Rc!{)1cƘ{ϖ +࣏>j`|dMl4oF\ ܹsao7~9[ɿwel2ӟԸݴ 8x8/_|(k>?S旿%# `֭9͠AT OHv;ha֜XW]ծ?_ۣGIP   &l껒 %Mˏ9眠/T6lysr&ODݰݻ~r>! 7|~y1.]ch> 1\KBrI>7:+t8+]:z q>7g^9GtZv쟑C D='C^>x#z)^Ȇ8jʩ?j/iPK\.[d8R?oiA'Z&RP$J+2M.ϫ_++6X.;lذ I?rH ~~DZarV6hp3fHǽ`4þ .?xFxJ9\8?#7Ma-0OA~{10x`;OҠ~_8>~(۶muGm};vmqꫯZ''p"qe8?qWI>8_+Rf ^dK\C1BHqk @Cz4ir!5@>яP?B&:zBq!elh"6HGG^ZzղƗ>^у>gO:) e0ǏwU:ztrDifnFY0~8QdԨQPP^w[oe i$W^i0{ .㱯 Kl<7C9:5C pF2tPT '`5YrYׂ~.5B  \ۋGHnB nF BzJ#Rh~ qO>8s# ΡƗ>;D=Vlߴy˩oG-84RxG-}f}7EL ɇe2;qnٳlW.:l pd{">榛ng(lv=y݀>p'c>4/?s2Ty=L8y(Kp񨣎%#;#yM~!ChZПSdODN       B;SQZ}`魻dV€?н4o}Zak7ODO6-q_`l,u@IX8,+v{d `Rf_F6;ҥYlYIHHi߾}%!  h2~a`3g&Gٙx.bvqG$Һ74Xb/<72ɓ'2%" QϏ<7KvװtҢ%Xpw,Ez/cZ?#Pq_N  @s{HHHe>f]wuv:(0w\3n8fzfw/2x>]Y5.`EAꧨ T/ҽ}+ŃKr^|b? ƍ|'"΁|'#2ŹH\1\dM~Q9hJUC2$q~9CiE^ӣEWzֵ:1cƘ|ni3X|]w*Cg^,q(XO"yݸ4>|q#_dD~8p w"RECݴg^)WvZ/eO+k>!tjtr墯\=2CI@># W.:R^=ZzO:!yVrۺV3w󶣖 tS9x}ٳ͋/h"4M8[_nJtzšeqw/nzIq~z_kYf>{/aTˮ'4p]wN4ܼY1пx+=ׂ~5B_dž{yW+ 4nQ9D4z3uTh"o#M^|敓O_yI(? rOψ@:_Sx6>SLb6X!C?{+f[-faĉ>`@,t|\\c "uF\H=C50NOPr#F_Z0uH x~zYkĕ[ZПSdOD2$T$@$@$@UL_D+\ʢ @'P ŋØ00a,bgF &IJ6,>lj7]R+o믿_r"\ S~#Pq%&      vf2bnj=:O`@UXn3$lKj%Ʃ/]邏N2 ,)WC\c<gf0E pذarYtg>,?sr,PNOy睗{MCH (5rQx}5R}HHHxo`^   !},WuAb?op0c Iׇ{'aʗGelI3 2qp[$DO?3gNBc2  $5uQqo]t %YK?y9h?'OD O`^ԭ\=G+O^9DFF>:ab$q去nH|+hKCe8C(pkz4ʪk׮Ȓyk9_}5sXf?K{>l"F:3``o4bx.I+J^uM.qȑ/Hz)$ d=7vXQag1_ ݑGit$Àx^E6&M=q$Bһ(ȅrmH{_vRfIo Y _ȋJ\E۝ǑsиE8' M΍|G8 hz8-Nx_|v/s>$G8"_n8:yYgegs2ӦM3zA6/x@4MsqK^yhkA_wueV/nҽņ|@|" :A#?z"~E>HOKwVr0uCWA@8 >G/$c1=X [oSN9`-bE" _:coF0`𩧞Oƌc8a/6=z(zal!4VC2ymy7]UCG71FK00{ls%K׫u袋~ZyQH9qGi 9+dOD2Ì3LuN$@$@$P%]&SEeHHH$P }C?>wp1|;30dþuvX駟n`X  0:BoS͛拓3,믿Nھ`}e3*) 7h&$@$@$@ hѢ限HHHv ,],^+0h K^8*9s =-= dX{UWerfe]w95vVٱcG2nVd!/5KRfw6YG8{1;:x4裹iR+ ?gf.i͝g,(ueɑ KƧK.fxKdIt^ @~w/K/d>#kY/0@Hx8򀃐#F$ǦoL<ټ{V/fd?m9^l=X|ym'"t::BX@ݰ@ō蔍Y KG-/]jƼ1,^laMA_L[1,Xtk׭B=[XA  h!0 >k'P 6-aorZ;b?qEz]K]\4L>VDcBr ;uꔐASy0ؽg)zP?8>'$3i$s^7 ZwOƙs뽻;v(ymh} a=>?#se{Ѿo䕓w]>,X+|b+jqz|i+'R#UQeE_+&gAQ94eI{n\Y b,!9K'!2<|YH#'s;oyOӣ}# "0H:\΅w__1K.! )/{^C6y8~unGbA?@47hS4#?IK>57ufkh^Wջa̋3^4Ld]ݒ!     @ƿyY$$@DO4[vwf*o[o6n9fc3>V HHH _m'y…I":!̀ ImXj!0@ɲO8,:"><1 ?fϞ]e I4\VI6(kgH.2V$EWzYOqϡ# z|'x+)Dcp? ߣB|v5}Yƅ e#qK/~RYg ͥ:K+BCu~}5\tA2sI}:  ^Fk{Q41MB䢏E>_Q<@E x DR?[-ǔ{Z#eeЅ@-跻Z?([vԳ QQKAӣɩ#_ 5'!y֭ LV gڶm!qHO)rY<z{!kfh1@S@ߒמyO=cplhh|'"!Ҿy΍|@||4_m -G>>i 6!Z>qc1 ?뿍YC  | IJW[Ғ @f7-Y,]4sHHHv pڭKN$@$@$Q1! T{1c5ktM%Y ^CQh49s>QI(}-k?Q;h49r!n]gΜi-Zd:u@Z95ys_+O^95ƍ|I $FHpy>}C'U8"O+DŽrjrO" :Z?RviX ɐޕoYGBR6#7Y7|M0}Ӯ{!ݠ>7m Xaފş/3?߰G17MxԬ*|ݱ!G5ٽ^nK*W/I&'tnSkG rC~pknQZ*.,?DAkGM.M&w|YgŽ nT? *MFs(ދ /uh[jb/kZʌvѦ渧100s{Oٲ5/<̓̌= KV.7#ZPƝ 4H++[rdgNl珌rW?rh$y rG1|֯Rj d}#mKi_U皜|@@熸ҞyT|Ͽ=znݺ $@$` 0M^h5*KX/V v/}~fʽP r"z-4yfu`e͸O0,nYjEų9eE, >_b49ͧ5g'z5c{2mĻoq25od(0 ~̂< &P0M(2hݻw/InD$@$@$@N-P, @M3Av HH`\wFcz6p/+ۮ`{`\{+6g vUa/1na#.0̍ fG\eňѿkNw0r5]6v&Cfp{??eV? n#AFߍ<ƺ#ۛ=珸*!ߵ?q/̞N3=ti,G;̌TkAd6naln)[s^Ŗo%g $w~Sҥn  .|WW{4$@ge1ӑ ]t4j8dtGQߞ1l5`a@?x^_|f/vUk۪s7grq/KqY9Y~uK_?aP;v7[mydfIXgCXyHkc7`ӝZ߭<onnx~[aݷ0{֦kߺY*0=?~/ ooA$@$@$@$@$@$@ PqoV]"x()W.գA@o/#ՖϡV4w9<+f{Oݶw;yEwcԹ}auoͼ2{;F=1;߲WfM2{ Zl0_HÞ6߯5 ε:6a W,1ʀ}pg?x%XF>|.60"lWE_88N_oΙj9ymYw5|Σv3ش?,L{|?~ʚ~[{* K޳?Wyƍ|ꗏy%q-2Ϸ q?W~)cLs~zM&]YཏYyk-ʅ]ieR{O̬YL^P87:䕋rhsu>߇ )shOY|/bY~}vg.Y{00m[#M/ϙbgh"4>%8a a\[u#;<]q'afYUr6z3Pg6 ?/Oǧ:P.͊+A_GSB:܁pfѢEfmIt=9%3O?Z+gcʽr ]YK]_OZiO?,Y 4(xk>kZ95Tm+ȴyиO`B?'v=G..kauʿqY'A.47=s{[p'%06E;w0XB^X|x]mUkOf!`u=?eWO?jq&GZx?cӼj+g !1j?Rj+?oQ 堥gF/j+~ ?+'3=$#k,xGBGXްi60Xs,-&|Wm:Ž|on摃/r,ž0p#kX:ߦs.}Ue,mˋ]mP-G]n~ ( KQpo[%'RNy<\Xb,{ ֡Ņ%0si^63?HHHHHHZ${ܹY,"$@*x]z_/z_Zvvfu+s[=L-f㶝wi,xWƘy+Vunܗvks#Qf.itt3t,,/c<ӂP"=NX^솁z}5v!H J-]s7?v\QprЯnq7-@+ roExUkϓ 3RH;cz-e߯0/sF&իkcz֊ ~ZohG2I~iԫqgƦv!]iB!y۫NAbM^69# E/lXWJWBzPX#rC'r|E ,LME[Lޱ殭d c6?N"%>2U^]t1;vtJB?w._*>^kP{i+xM X6[J1=X"*ޗ@RlAR2O hέo!ϏKP )FRdc gz֊ms}Ʒ Y¸B2 GRP{!GH/%eo1T{Y!^=94= yѵJY+j/r?:ז&w P4~Sh+){Ԟqws|}0-x|84CWyōUT7HHZ6 `^嫾ew֞HHHHh>q͘ϟ??NY~C/. }^9P΍|@|" :\8UwB编_!>]w5m^hgΜi|m1] x^4}MXX´)PV^W3}N'X&Ӝ9s̊+.WD2W񛷽1WJ`謁y>7 L1$Uyq<]=+*^?jȥ=$wso+2yo ɱl`8p$ǐD#=KѣݗA@ođQa& T@g>?P}/xkCrq(E`ϻB"L:ۮ\?2EiE.i[hy = r eG$W 5!M҄QP8Bϋ$!Z{_^Erޢ?K{z_Ѡu1BZ)?%\PNrI\8o/$KګH#o%z,lWp5^`*8|B q禑rgi/,﯇~xb:tx& _SLK =&^Ei/Ws|+r䍴?K{, i򃕸ix,{!NX8 @ެ+~ҭIHH1M?Ï@$@$@$o؂2Ǵ5R*HHH|SWA $@$@$@$@$@M1OV"X?   *"пiʂwz $@$@$@O[.l!@$&0 jNiʕdY+R\dHs7Fmd)2IcrUA`F9z! $(luhHs)ħ?՛6WFy%-YyJ?}׮]Y⊷HyDJ?"F}N\1\dM~Q9hJUC2$q~9CiE^ӣEWV=yykqoLN I/V vio#\KCr?,ʑY>h|"Z3KW.<@E,YĴmՖ56[&9E2-}^9G4nC>pIHY:>~\Rh{P?ү}ؽfZJɫ}kқgWY0a\cZk?~^:B*%!;E#d!=HWN sC6l<\ʥfmQb~@^k] Z?+G|uW v_AK_ ~-}^9|ҾZzG1|I,ӐO8"ơIڅ6p ,H'$O{CWVk W̙3ͬY**H~36gz0O<3GWqu筛.$@$@$@Jo&ͫ,g,K.5˖-+_5 4)+]aWMwnދ?iNw=ll-nǦ)<-jJ$@$@$PU_dIU!"0JƿX3OH4s9=1G?ط\0oԥGsݾj;w\O;߿dǟK]jO^mqYqaOD_}Tլ52kgLd-Z +V/Wx3>} w\wd   z 8@}< M6~깸LaUy-@~&qsIy(Vz_1V}6sq/8WvRn٢2l^UWROVzBz_G7ȟeL) p.\GzKW=e_4Iڪ%0>T< j_P!waG?1}1vi>=*GZv|u[x{j~m+V DZMrDz'N̜9,Ҩ|5-kGmTK0vÆ_dw]ӻ\3۬Z4M3iB\.FU.G8>>?f Z߲ӒʿXru|z/~lG=/O2o&eixrQeXr=/j)HK!9Q'BwWuߧ~˪?Rz8 $eF݉E uH;"KD0pG\geV{JZE*V y|*jv@gY|l`ܖ9Ьy*} ($V D9DFhtp0,3J1u!0UzO|DxݯD 6oh"@ nZF5q-B';oPZCFM6_#6ϮAi" w\D}x1giDDuLÓDTe_cYVrI'kzꏜ.+=/'|_/nwW/r]ݮ=ksw/}ؽߣ[%TNڽ^r\'tEmr>R} ~\4Y'krMz]M٤혖Ve&[z,ʪ'߲ӒS?`Pk;4e_H@6srG`!`!>Z~r~'dMq|U@q| /)pk_}-KmFk榲|r[ҿyǡ㰕^!J* 7S;$HmG ԒOZhɁ%(+KiolIlY/jsqG.iU2_ 5(¨+f]M 5C6nT>gDVv#z]B-!BZ1T e{'rq˯ bc>(Tm[~hi?`PI)4n߲ P8"@AqommX  D"0y?QcX8 `.3TLH Dx*WGr18WC-GS D"@@Y({𯫫ˁID"@"ax5SK!D"@F^G~NFkr DŸʯٹlٲJN"@ D˫*CFgp7m x>RUV|p{}i7@_u@_{{{ OSynJ}R">@7(˸5?6nT>Mi먭n몭zYP9HkN]ueJ_mww fBHG _\m;HS6 #U B4My_zaZbH77lùFbczݯ6"v$>3f7)Qf7OoƍÁ:I|ɾ7vX:p2#GܴPv@1d%ȈoxM1 2~x7}"vPv,9Ҥ/p}DuYz,J*6zwCM+[?$ۄw>V2aP'Ia%aĥ^W #caA W>-o>HJaQq|hᛅw.SY㻝I& 7n ]J+}&p#>O1ZPq%@ 3Tks@LӴiӤD_h@ Y7ӧEל;D"@!xuTDFHeGN{\C@ G<#L᳄$[a|zIwxȿprxO^#|0hs~] # {a#O# Z9xVu+aA a=SuDYw#0ᇅ#y%°vN&x?*`+xWX XnU4hc۽+Aq¨# }K"D |ޯD`0m++a&1񻞲2"@ D5„ UdQꃗ.Cx  I8N<*<[0dgag :QX~3Kts' w ÆAFp tkW#6Wa) -3M:(3)tv%)~Nm~˜*Fv΁‡&|0 #;869U. &`Y¯H%a D>( "P SeP=X, D"` wtqG`XHx1Cӄۄr9@Ns4D{##X`ABa@;s[d6'vZ DD`CU'z7Y?jj\eHx,2MmrUCR">xl`Jy8g7T1Wlm/[+v5Hh%_@"B;9U#xzҾ9och>,r\w(| a?^xWa'~ $1|{p0h [d{ȃoVB`AO .aa0[t__){0pX(naОw c!|Kr'e$S //M"u\)Jcٓfʰ-\U$ï˚WIK␖Ve&[z,ʪ'߲ӒS?ھΖHq`M}=Q,ؕbr,]Y7z셆<&~b 1ȭ.@IDAT! ٣v,>\p#>G}[O,9_ -' %qPrBqM⠫ru[|dx˖-nqqCgŹ9P3yf 3&|?,X2ԝ0 PGvphiq;K -|qJLis >?>YXralo#)# #xTaP0z[saV1c.;IY*mHrV\w1zr?BơIq-؏Xrb__ be%J 2+}#,jz~&*y(1)KóuԨQ޺UWZZZ5jnn,Ρ ի C#eJo'B#Wh 1Wo׵F2 > nkZr W)=rP=VP>B`[w cf}DX?܂~܁B 7 F cVfW #`saVܹ— #0wJy{e+]+<]O8:(e#^^" ¬8a;@#8P[A_0|/F@Y> ~+Y' c(m+! _~0ZX |Za_! BHᝅ |ah *ٗӄ?$HB?Q^_P?7 uRo%Q8(>Q<*Y+XhV5["@ DN+`=6o^#XUC ^$C Aß#ha _}0(OKes[ |0t ÅTCEc%㶜 ~ l/#[;Fp0lF  _,faFAD@?^Ӳ_4aH\(|n° A ' # BlڄID"@@}"Zn&OmVnG DT!c?R[N)"?)@$t,"J)}Wv=]s%#P%mkϱr|1+op<꒬<=;b}0qЭvɮ' oF%u"w8RFye_%DSLjєE!*KՅwJHD"@@#0iҤzb$ፔY9!f"zhXrC0hjI;R̤a1_uB"4@=ID@hdV(wo򙝢U(SW $8eՑ^ϕGWLI~ !?Ła ^jo|mh_=[#9?mSQP< Oa=O*ҡ}WcLo䪫T=$~J|O@8;ȉ&CPF.mBrQIc >qH'Дyd' >q|VUa(库?K\&P=Vz >q|}$~\ ,;-yj%>GlH._ܭ^͚5 ]$@VzTR!>P_ucx,]mݺ-X0Ok?AFC170z R~&/J`Zz*G6n`PLDj_K,?>L˓5n A5oiiħ0ħиIYOE,R*#D"@\T4"@( F*!Î@ك^H D"@ D"@@*e_wرL"D"@\"_[֐"@@c#0nܸA/76=ʾԩSwB % Q?QI=WXL@^J\7k{f%N6O8~-Z{D/sP\)n-[=(}5OY%KZZaLo+~~NKN@ 3Teg͕T^BXg~モ+謆RQS]h[*Z sh'iCJs @rt;d?v ŧy["` Mm5%~ls\,T#OajdׂhNKZiXr?Bơe8MV߲WUo,9l܈n|oU&6@}_4 P9#W`^=]- {[bK<,7^Z-Jϡr5r(Vz_j§XM-96n8@iиlI:1s]X{9wH%w\[*t0 DyٲeeJuDNX"ri{cg騻Ԉ#OZe&I߯!pID"@@!XL^X=f=$WOUi  DCZ[[|OP.Ư "3ua7|ץR4K:.#nqfsy'I$o ,=;D@hAh]jτP2MqF0WuI& A9lݺ">@7(˸:l܀)y|֯_ɟϯ}>y~si틇ɓ'|][ʫZ?~=%b^j㱛'+} Ѿ#]fֵg,"'#mځmY[P=i7mtˆ:;;մH*G!T.8Gkbc/1Ɓ4> %J,ΖU[J (+mK[U'W;vS*@AP9Բ+wɃ؁3ԩ7mu'cʡm< [:ܪ\K${r6o&N5Xrlp&L&O(Bھ#Mj%CsODT(GUqzxuX&Ӿ;n?31OT DmwN=GxJ"@ Dz%&gUWG֚ﴯ  뗌P94f"-p,ܠ_ntZի> T+[vZrTnLװc7T^ WʌX۽wʅGGvf PL>7PcݹGt?|ݿ/ Y^D"Py3gά {hZlBDٿmF D@௒$bh܎&? ";tmsUyc>"@@=!}^EY"0-֞[sJ=Vir! yp?H鰍-FyJգuHꩴ~#>#۷//-5ab}0+TZmV);E"-uj#$r AW?Δo鯸R0£z;Ri)w5/jvM-M^^\ݧ>Po*׺i, =WjiU-(O,F(8 0Wz{ %oj;tZVjj0 rGŏu_&M΅U_Wu J?ݟ_&?hݱDhX8XrŐ+.'ْ~͑`=ޟ^Ζ\5?ɑ'= jg+hܨ<-Skj}?-TbPĿ0ՄOq-/lێԒIi?jGKǒך~WnUn7M̛<Τ,MOퟖCeuKՁB̙ _(=%,>JvMǬ4>s!/Wa6BW[@|ھ--ї$8R 鿡~|7HُrCI]F~˞PyU؏Ki RMsTMoJjf7TN|aH ٳg]~-9#|kKw̍>_h ~q0D D'gVD"@u? ʈ@+/QUeD"@ D"@ D({𯫫ˁID"@@#k͛뿢! D"lB$1ʾի߻ (t, ^ϕgܐW-;-yC@ 7-Vk)֍:HvZI&< 2"p/֘jQq?i~e]VeiaR^*֒빬UV+4*Ö韅Aϕ'?W~1&=!-ʰMXrՕUϚ5k\gg8q"\-=\V-=&&>q$>/fSFme$N[E\V cW}ԏϷ]j\V 7b)ǟЗJm_k~j{#jq'۪?#aҷǖ=rz\7dz!@| P $HG NpvcCJʭ#oF|"jdIIYZ}˾/>J]^WRvħVô9TZtƠU,CW~GثkU>\p{lHj}Uq^~+}9µ^-vZB?W-'q*oD%sD|" BqC[7!҇ʫI1-JYH(틺ֲ~ Qh;Z顭T/4n/jӧOϿD"@@"ԩSz D"CޘDe;֍3N D5D"@@ !} @W=h1 D@ힶV:OkA D"@@}_LYVٺrI'kj{BV_6kF FÇ ->Vjka.ڶ㚏r¨'(\3~}sh?'\2+i}LϕiU-Gcb8XXSoՄaMz.َiiUm2ǒzB-;-9Kem_ gKN@+ZNZ[ɐ>./j+VpeI =P*,UUnݺ,{B媏oMvոB+B‡gGoN_uj_՛gVZlϟIc5sR>Tnϝ(\S,8pߒ$_$1GKʣ-ϭr?Bơ?&~\ +WN~0Obe% 2+}#,j]llv}GW먓U4yЗjFATj,Pf~#-|p7~UVl4|,?ߢ ],-9`|B}Pkkޔ%4!Б߲'Tn{.n.7jɒ#͘cܨiih\~/ *G迡vZi?ؾXmP>?,96n8WV.AkD- [J_h@)Y|G3"@ DS`-'w~![$K/IVPy_DD"@ "P?&O&MT]O D"д1z40[Iqݟ>}zI:F:<%K1޽݁2[r[rUK00 D΅ m%؅ J DCʢW$D"PWyD@FdL7 >olfD"@@dI4D"@ DˢJZJH&߸qc> fnݺu,M҇ʑcE޾^'% ZnjSbwwwf6m|ulѾF1yٲ'T8>5>Y:y/C|"7 J_c7T^I|6m5 q_J_Iz,bo-;-9GJUK š^6mVumװ~h)-{B?4T+u\Mӭ2l3_,O,F)բ_mLI%qHK2l-=\ueճN;!KrC[vZr|uꟖXJoOQ5\Mm>a&Cj^hȓițG~+=΁jUڭۨ6v}-,hJW)si8|r~y%]N2ǒ? )rV.=H|I^rv!>,OOTKy\z?Gblsٮ--~6y4<[GJ[U'--->aʯPa,M҇ʑ&Kb@82zCKN@|klQvXz,yKK3oKݕ@'?J.CG~˟C?C$-}p# ?Y~bɑQHXrlpQW!j !=!GhڵLڨ"@fpgDF D LvQ  D"@ D"@ %"P~aAS_U6 %UWz,{*冬ji+m?۟\'>of疜O]%׺UK Wڏk~kkkJ+O+#7iٵؾSius+Mʰ~Aϕ'?W~1&=!-ʰMXrՕUOu?Te%~ cC-ڮWdH}vfYmƤBRHsZկv6M8n/|!BZ%T!`~ S3k,_R;#O-Q)C>ɾ?NKBU{N#v'>'5~\+Ϟ=Z߲ӒʿXr >6IK#gR'WͭvAj8r_Ff:dɑ/?og<F~ [gs:ɍzȐe3o?-#گOX?*=rȵoTr!۷p{Ub#W">'B0V)4no=D"@j%MJ  D"@hx۸q۴iSK D +|a]6"@ D~~GGGV5#u@kex _> 'aV9>s)T;֭s۶ms?x0R:'۫ έܒJ [z{{A;7lLOg?us?im_n_c4<_`˚{H5zh*q<]nvK4C%[C:v9ڴ?+}#"@ qfΜ}oz*~y5@}WLuGb%|lPεpanZ6#Tw뭷6,Zӧ}W[Pt5kָx Qw&7msG4Mv>K1c +Q>pַ֗ͰhFSSfOWR=\olwgʂEJڏʥƸO< iVf?#{d/{3sȟES#ۥNv-=[C)SUipR PòJ3n86qY*JzBqHK6sKnaKBJ˟IژfK␖Ve&[z,ʪi^Go}D_~fo?y=oP˭Ʀ WdH}_]eD(P E! ]w,Y2dU$?i m!9=z䶁;3<3 JB&?$60Jw{3OZ铐Z}6&Kz\N]ie?KKZ,MQLI)?rҫ>h>p[xʉ!>dV^BGqۡ2߁+zB)'\Dl\{ P~Pkz8&dH?U7yUW3gkkksj`n|"'@%d5Ba"D oXrKOoIy\ĄQw+}oSZ:[PQ?rK?CX?j4|⟤HAZzh  8X~sI,cɫI1-gPĹ>aP9ЬASCp(4n@WV|% W*f%D"@"}"tD"@@YH0/5,"@(,]'5"@ D"@۟ڂW*qك]]]L"D"@FCF\K D5x߲eK]ב#@كXwݺuD D"@ Dae8ȄL5QU||0>4ƍIWn޼AuVlHD @ww?IJܧ KBr!GUK+KVsD55-Ի[Z·ҏ:MJ`טP:NZDO+}#KmӦՀ:;;qZzh  8X~s2> #i9h1ǸSyoFk0BY&_kʡLJ [p=!G)Wƍ|Z۽A!L<1c:E ?~e_w_u="@ D"@ D>#"Pr<&ȵ-E?Fea&1c S׿ܟ'78C)zzz|Efa6Li{ҥK7`|J/(Y? Wnw-O>:[@Ւs=9w ʨ : 7wG7/=4A$2^E޼]zjy5}ٳG ͡j}{.'wUn,Z:%C[4Wǂl""@(< N|Zb!%W|@sm؜.e֯Z?dltfx=I͛^׺n۶m>{YASN9~CTww}B)to|/vwu|{܇?az@T+X@rݿk^#Yv\z_5ۊ+M7𳷾駟v!h> )^tJ_FE}+&ݛ/vm8{DweѕRf;ʏ(#?r矿Lƥ-IƟvwݒ%ϻQܵu۶O~c/'cDw≱yE ?Rߟ5ߎG5",S6qvK_" "gO>GT(H7ʥY%O D~,U *|0'/5].t W6)ba"/>Cx_R|Qp0" w0F . 'N}F[)T]bH?jf\e/Sr\td(!A 񝤣:x!@ ! y~3͜9s,>Ǭ;ubĉNW6[nW 4-qn„ >@;r~OY :$D201ci1ʆٳgя6y,Î6lp;8q~FA`@hO`0+aw}Ёv‹.C>$I\Vyp=uJO|Pӏ[>TZܬY*c3΃P߬ct\RO0L{oѫ_=Y0vz 3ov&i^Kw񣫫OҎ!H߷I@1Mkq(իqxK!f=6w?h-[d~^miqC9wfhsb}x{5C AE$?|HvtHVie蹤=4*ÖAϕ'V~1&=!-ʰMXrՕU[prQPx+wP9@E¯K}!`8a<`~Ի^8pwp}n͠1^G)\q]v/aT$a {\#Ga- ǫ/ @i1(G=r ipLx0S'Qwgw$A ]X) q& +}P9LAna[\uǪW9Bߪx(?id1u0$?} nK|`/~p ?YfT _olIN=| μsя~9k,+x[0 ` f!r׻]w+$H̵^>J7MC^n6y1ǸnMf!u]w>C^_w-ۗE/z=3Z x>]ve>6yKCl_W8v#bk{'x wV /;1q]U8#9T5WC6m{_4}A~]w5kkSێ'9ϒG?:wl1aڝr4? k`h>u׭ҳ>8};h!|fH>6}lf^x 1Eưyޖ~]~jw|O#gWɬ|n}Ǹ|f>UW뿖y;a(Gt;o|cwPiY~OY}esܲɇGG? i|e1/~c>e|za/ӗx!70Od/v~K@l E1S쳟uu6f}qf(Y~^-ȪyHdL5Tv6 znfLa!O+$;pG5\rg?@! an& V]xaD05?{5/X~h$'YVjPy%WmqVzQg|O1~\+rJ?`ֽ[]G]g\өMrɍ|(ޮɲE`ǃLrhһ̷cgqo#pFJa'to5G ˜w IVC(|A{d— -|0/d|\a+^"YXd {Ke[sޅГ)ٿ0wr?x 9T dz?Y1ꐔշUg!c (!σs((.CZ-\{e}邏N:+`yU\C~XhiדּΒ&f};̴ҀiR3 2g!}U\2CBq0+s"҃P6& 9|34uT 20k`!@u}@6^.`&/{?^WD6/'ly}8}m ¦ Jdjgү,9ĩ 'R#uC3/13m׮]+#;󼏠{h㫯vq<߀Y8ǒ#O&7CG? ]$``+qz'I_i~A͖ev]ewryu){ {~@526nB~Yw}= l9>ywl9t!If}3g,S~v'Oqi7JPol+%p8[dşKqk_EK/]yt 3.p?}|T>/A'wsg榛ֻ|T?l?Ѹc<.<\ \7;|&yl>FC-zڷ )m2Joim5Bp{u}nc#lj.=_<[o% z@ߖI3q3e/ HHyݦD5<~3rAfN~`ij̼{#{ jvW~oG^R 9?=[jk}յ.cMf&o& C_-|,k4Ox?(>??%Na\õm¸8]]q<#xvrYJ~I[Q#g,E¿^(|0@>X8 D8w b𛄯X+& pra} :_/¨i`;0 nju*+RBXkA!Y4MV_t!{G2xA 3D8u ̊{g5« ~F,5v/A*L~KA-|8k5E|(AK]K.̺9̺Kzz5`,s9gВ\x"fk!W#1@H:\<~x9/炀-Hw^Qdkܸq[ lg?:+:-G8Tκ{)^5h F|w3^!/1[4D7`B}$J׾v%g>3{+Mf5dY{yWÌ+u6odFr{K'WY@w5WAx+׺sϝ᏿h}m[e`{v/yl·mZ> v9f?}tDG.%vB07a÷!3vldOɘwVŬ?Eů1Op¤h`m[beH[Mk瞝gP`.m&%qrA ¸NZ F@ ɷ_Kk-@,>aЧZxa-O{ѿof01p_bq>6(?B_-ӄг^# %&;.?sB0]f ?*`_ =Y蟒°S>bQ~H-H@kQ@-9, dk?O}dBrl9l=X? 89oYf|ȫљ:q[(A/׿ja!8G^aYHGu*_E31 ANiQG`8 .y؄ ѣ6O#fM/عsataa` = A# 1k3=plQm/T#M m:|3 F}"I$7@IDAT"O"3iD@q SÁ޽ȏ|}]$?{5a}헛6 9݇DX3;2pJ;M>0pI@Cp!o1?9e1gOwx3gxȿW_Y|78͙H1vl ~oY[ vuYE**! AԠ7sXv|#2Aq<*@qaȃGO-nGX 7 ? |bễ>N5kt 478Zca&s47) =ۣ55 #Uk|&T@z,b5q9gپ{+lmO{^ >:6 x^։r}/-:k4#¸V?*sa ѽr #P… kA9l;,"/"b7xEg)\ Rb_ ȏהuꈠ%΁p:B'XQek!hWACz A' ^Ŋ=Y lC, 3l5tz{ %KlBՀ)} ohy=69`A N OpXbS/~(d $1;(?̈Ffa63m~F'?%3>~~<Ce/{BD9[,ACj~SfnXqQc1ѣwQޢEKl7)L+m+ 7ɷ@;]q/o7o~Ο+F!pя?÷@0#u0dX$c&IHɫ_`5@a/Xh|yr] >)仰;¸y$~jY$@G4O~;$ϭV .}26ҊAˆO%/ۄA\ /LW DJs?' a%ă'0JAxe! nFpxa<s  Dxt_&qHLM /p𛄯UemF§&|kԃ88wlQyޒ;FP3˜ ~'oF /[v 5a/m-l <> >i/aCe_ XJg1!Us9%̀o65R+%@,"J0 7nixUXC_h 0@^-XEƃ@Wh&[Z7×JtY[uiM7~x D{5b& z^&[}# "Q,4L7zQB;Y=}b?>^?ɞ>ЄX]~4h?M1ʌ1ъ07 _pL\u>CpfxۧEJt!s}ƧE~|PqUTBf!@ 7DhA6rJpk+lf;O,L`#{D ^܀ CP3H˪}aoSrqY+ro1h_,^( BpM8_ua\q=ǣ T`F@akvq=0'D '|2aM§ ]¸^? /ƽ#6 }zآ o@A?O_-!a ]N a<q?s0RD\J.29IZ*e׭2lKRX*~]HWdvtSlu16.ƾC^җC( 0ͼrYv諳A)S OO# 330 1̌Ê@# `L}#A'иȋ,?0cם%;A%KfRm!Yr7IWѦI_be`N`N ,b6Ge;jOfw>\Kiyū%8%P^=ZZP|v f `is۴4Lb~h7[*"e 9s8W7lzrZ:K~}or(B [A@4IJ n@T-Dr|ᢁv޽=_ͮ/`jn\%)ۄ^Y.l5:97Ixpp#RXt2''ey|tBЕ6~S?Ho/|omOYxʡ훎j#@X~HI]uIL(93>>A>VEq%fQ,AZL5Ck? YpyC e d~Rq@5I{00Mܻ v ƽR(!~$v|i%)~}ַ|s5%S~/__Wx/I~1Xw2<|5鯸  R|5K{Vctg7+7{vm7\mZXyw$ie؊oKBT-?kzjz1V-vn@ zp@Bm0q7@6~ZC9enmB?:C_\7 Xj_L&a˒%Kgϟ<{b O>gsfX{!0V!'%l;u]N٩g ݻdOʒ>-#~aP91~e<;.9 иuY)GNlJh|Zb4(#^ _J0KD@"`֙U&D"@<'_1v#7Qb&z kw`2GF:,4b;U{֖<xD D"02J@ӄ++{%+[F D"@ DT<5C T[:e_ T`}-1&sI?AxRgy^VO[ R*9gKawLmY巷7nZ(a> w@RIߤ~|m~ꄐU m2\풦[eR*?vWMK蹤*6cUWV=djzyujPNj>a[!O\ʰ-UJՓVY8QϓW[z>4YRO>9 Aid1 '8$i~=;>(կsKX~h˚*܋>zfO\!CZ$(9T-^ćX>?ZyCm ,l%W!҇+~yC|I=!}_ڈVN'+)"i$H[1~ߒH[9y.W{E"P8iI?r< BP9W8(C1=1ZAIb\{ʁf͎ŧиOœuڵZ D"@ DzA˖-D`X2|ET_WW۶m[ZY"@ F`a ~"@ DQ-[7(%g-Gβ#%ݮY 9G>O6 _aUZ֭}nT";+UV eMtD"@Ȁ-2$uaكx9Nr"|뭷EEデ@%w2z%S%G?cƌ,):͚5kR\[Ub0.$쯴=p;g&C' H/ߵJ*>O,+[)N+}/CzNN 2!~K%i>TNQY( "@@'-%Cnj؛UV@i7Iƍ}Zô_Ձc &M$|E?[֭[efxJ*Jro| J_i˭?; IU_K>8pTOߒRJOyܻ߽p䪿VCWg,ׁZ''jR>Xr?3uA>RxVUj?7jܟؿi&te*M"> Z~H N/ F-cn&^0ѭov9tG_(o7TSMNf*[Ʒ+[ݘ1c\Ϭ׷*NX !A!L<ex;vW0WӅG+=ժ|0=ħz-\d7TeOV\U,٬i-;-97ھ\έ[''^= 2>TYO8%>'y*?Q^iܸq-{B啴_m>ԯ~0E>Hkiɩظ\g$_O,V1 6u sܴva0! BeOI74,@ow| EQ%W8\prd{~w饫݆ . D"@ D e 64Dڼy+fb!{6DYG o M6h1oy]qEC#Шu5%"@ ;::꿢!3 |2ŋUtR^{9h8wz.J@qXdΜ9d |+ivکS.nʔdP$5[lqfͪ1i. D"~̙3C2= #@+^ XmJLV4UWzI=ЏrѢEdwwq뮻-~Qђ#۷UKlG͓5Վqҥ{ղSswo>&LhsTk~9+Yɚ>\5>G5/C~bOӭ2le#<@r'yp䍤_1HK✖Ve&[z,ʪ'߲ӒW&|[-%dV~j/F?tmrꇾZ/ȟ%G]AYkve|\W#iIÝUgz,{?B'Do+kz gKNOl-?䚗8$>h=V;ZeJŁh`ɣ\adƏÖPy%WmqVzQ]_.q}>?%qہq-{m?We$ͯ3hyɓ<>(MOW@QPrUWcXIj_mPVjG_FMQ?k%\Z-{B_|OaBfXՆDjCK~J:{ %' иO>}9<"D"@*њY)SN͜  DEڭ-' @كcǎucƌiP8Ym"@ #@\ %,"@@u } E")2oDeN D>rd"@ D"@l#Ee_SrFPi]$kz͓5}=@c_7k4|m|u9뚞+&r?(W1A(UnQ{?Bơ:=W긔[eRmVmYj;ZH:$iiUm2ǒzB-;-yj\GMG5SB?(Oaɐ>Xuttv'}(BB嚷T=V!@| P+5kj}+ KNħ0';>MKoa[8[r-#ժUn˖-no-=\3'[C!`F|d[*ԯhb@?YqrJork~NKd\8OO];_Yz,櫘qeYɓ%Q,MW56|r%4CVc)aF([xl%o4gL䨪_$dEM첫TQPQ @|<\@DDCXAٷBI&}TTMwtϙoS=woUwU(dVl|>O}4oU0BT8~y*bb_K>zJwkWFOU}ͫs[ ;80-j琶sVq~em$+8c?>>U>yOϟo]}1XR4}F~?c0c&s]<}q0B&ƀ1` 9#/<Պɫؑf[%Y 8$[7c|kTŃ#F,YRC+ƀ1` ƀ1P-ܷŮ5>c`tkjkJF+\̄Ě ?dȐzS@ ݧKvWnBVg[xq7*4c0h`SS*sW:FvDDt;>=V՛Bx[!?+uko ƀ1P+ 31 lߞcMGWXŃΌƀ1` @2 hU"y`؊mɎYYVo_m0c0aU|0/SK~޼y]El8… {P>c>+"$gyyw~3)ޞy>هgޮo0\;tРm˯6{,bᅰ`9E]|z'v?+tƿp^=s-h ϤXھVXKK]?h`Xǰ`̙.Rf>]+_|~? yc_[꺑~q(kT_l`7 nlvIڪ%̫_նZK_m~> C y;|z_>=u?>;>}[[oǛώO_m> h5ơUj@; j]5xOW;ϟj? ԟI30`1ZTJHڧlߩSLlͨK|RgJ1cpHg5'd׏>=n:y/uH9 \T;v1` ƀ1` ƀ1` ƀ1` /[v2Oc e ƀ1` ƀ1` ƀ1` ƀ1` @< Tk3j7>Wijb p^w,i1yk?quWnv:Z߼y)4칎ݳIrZs.'UW]$yMZB\7--a[aO>;>ه}/e+V!MMO*=d6&uys<_v?n$Υ;*ky>/0`O7ZέJo Zei41(sqY>D}R>>Y%B.9PFÊ=%?^lUD:$yǹ|Iە/>;>};c'MԟgYl LxHnӆ.n'izVu”_g.9&μv|m$g>هe_-Owgo濍z?j/Vޫ/e=?󎓎^Vf@+o_S~Eʫխw_{ c{Y0)߫ ¢aJ(t/P2=.Vef> &W8T+ׇޜ?4!!DKb'qZ9{ӮJ_꺑~ޟ9sf0k,|01c0j2Z5mc0fM.N dv<"_"PgMnRaf6K PpL=nPcc L# VUkWi SJɱ'o G  8p!XDuUnОӄE p -8Gw p@} [6a~([A*o|b0ze`r8^~yƝg6B%Y˻c5zgoO]у5ؗ_^nq',!p$ Wb[ HaXtLӣ4-a+Pa"8xnFWIprQߩCB< 7okDpzRm,|W2MKpoSka]) '>'-0s&)ԋ_~M07'w 'C8'|@p},#~ydˏZZ,[uJtJ UrwZș듴>p4NGZr?":kMBBp8ksʏ~ \ sMẗR\/P_uBR,(=Tzu>aua0EˋڸY&\x3J'+=Hh$G1 $9(^+wΥղͽ> KY/Ho>}zǵmW>٪}Gť#M|W7n\x1H6н\#\;>~4׬KՅ=w6Hک~|~ry0&}<i*Oj#qM'U N|RX/R\(n.7(Ɨ6 (n7c·n?,)<*$YE?N> 7Cܰ;>}Vo*L}|;ݿΏ»c##%dۓۘMj~J *TmiD57aZǝY[٧7w| +q/ZwqMጵ%l,Z$ ܐ@OƖ@C} [wCwd0S,p@(Y븑~RZFE#8/ EJ ](pK2KN\V|C8 qYS>. §p$[Gn0GE*p;I'.})UEpoqwx{8Rw5(@ # ]M.}y`}Vryaݶ>U뛯ޠEc:-8KO86,a8?^Np~,k(wr N. rJ! s]@X(!L~&P/Ww}HpQ‡˅3_ _*8~Rs =IpжalHhDžRǏ~ڇ^7ZI~xUOViώO1O]^?WD~NKUzcsXj ƀ1` >pݸ~Zu'Ⱥ@8KpIHpTt?U~k {;!V7 Pƅ ,u;>7&5My¹r~! gܣ|{Xᆕ6R7܀򍉛f| q S6e9)9F ; 7 ~'<( 3}+NaC!\Bo>P?|_"&endlo/:=wtx^ϫǪg엝]/,F1m`;J$? a"Epjp{ \"'2¡J,sX'*u˄7s[*|G?'WÇڡ$ \p-,ת^"p#);P>m~kk?X@30rWvA3d ƀ1` +f`䊋.A S`4)6ͨfu=/ >&$p ^7(_¾Gv t U; s5SMM)򢀯?fCr/m>'#N2P$ ; W ]]zF/Ǭnzy?ngw (sBilHn>(p,mYE%mH'g! u&h9>'!+L8lxdp h\Т13Ьm" ұl:n 2\CD57|-%N(Ϛ5zec`%cchQWe%^6ZV()W=lkO?_Wk>bWn_v} UqwUi-mun,o/h6ʯ'\M#Bn>!lqNd$ ?n: <tC-˅o_LُW ȃWz u$ ݣi7!@GlEZ+'J'Qp%#}h{Hv&EF_? 3ؐPA%P@>"/(|\@:J~G@uaS^ǯ >cHjGkRW}oew Aպճ}\X-kGv|t]&u@r<ЀZ>1W_B;[78W>//]K_cyteKqi%Ᵹr1< ?_/V[/ӧ-0ӹ[s7lߢ~ x%vs*3WQNK)OPiJWR$j\#Ÿ/o #\ .pB k6e \[cԳ; J ;>}oc+{z{cc;[|!ev4s!ͤ.~W?~|0p;]f f,ԢT7:Z>;v};쫇gj8E瓵D+%6K\y+eqq *B@{E:ivz瘅 ֭}~%1~\ioo|Ԛy? e׷+8tРMjb #s)"î:+Ji^zѢEE K^}ۂvoV6-ZJI=2}ic7cG>}^\G'σ u " ەgǧZx0a'}npLm7ns2bĈ <8p6B@@0C[%[~[iQzo4u5?vvun׷7&W61i>1VÇw!C2砯~//~o2uƸkF\zs>҆Z_4 v7Rh1cƄ$z*]Kӧ?U ~J]7د vc0c0c0jG;)՛Qr&P%0WU=-1?AkjpյK@1P_Q`ƀ1` u1wW~rlY{, c0c0ch]lY{Tm;NGc̯:q+GZ*׎kCNSoggg<@><՝7> Tk3j72[`2!D]zU<.|s)\A6B 6{W"9Wץ[kK߹]gб/; !/Xc_g'Mtiu}ۼ\34ʵ'w@IDAT#-^g\;?L?8c^^~ӧO+ O_p^R.s_st>t>A;WoCC%y~\5.wL\ߚ1:)~뭷Yf6fg}HN>,tSLJWC{i[6?y)0[~aLܥn|3;3ۻ>WRVs/v7x#`6̩q;}zϫk鱦⠗޼}v|~Yu?ħ/nW>O#Or˃ m'yֿ|?OoK+nniӧw(eeÖs&], >0hz)Xy%kgj9f@7+:-hEH:uaӑI6%uivZ*4w0Y 3 t죢}9&Ygo}|7~7}wg=u7~`4?W6zlGdF}_;zL^#yyjR}ճ%|˽&?Fه½oL ?;WT|L{uuh&w pTiOƭuVq!4O%fSzkEG.^G@(@ӓR~,^aƀ1`  k[ƀ1` @0𦴉i> ^]2W+_QN+O<}v|هg՝uokj_O;wthEeU'~KYWm4~Zn_\ʑfjn_V>ه7>&ܾ.lXԶ>YWzA˚߳JL,jIFq#;Xgǧ X j5· aZ|I}p5.TA~.{p̔01c00й~=]r_IO>PIf0c0J0p1%H] Ejhǵ{AeV1` ƀ1` .Z>Nc0cPy:5>bHW>N̿܊1` ƀ1` ֶ7g ƀ1` M[[n 9!cXU{k扝~;jԨ穓JUL\YҬcMxcW~276,a\*x{4K_ͳ=ϗz_[7֒{~l=gݹ}[va~wm+wm$6}I:]-M$|] Dސ?zu~L}v|zwLhl$|U,m$vLqHppBzg\;>~Ǎ[&|!ޱj/=W2~ ƕS_j7}z&?Ϭm_7vH}v~b0Jl1P (SE6>= IjRqF([uŧwpЖҕ=x5E(x5}u}ռ> f6}[guاǖO?*ʫ~E! Ҫ1Yֱ,jm)~Mz^=F̾fx8?kӛ>eLc ƀb{`G_,u+#G!3g f͚7 +v1` ƀ1` |/^8X]0 <#W˦&vuo o18x$?wstwuec0"?ʠ%Ka5c0za ԋ1`  (*f_tD%zlƀ11P^z)"|ѡҬz3gN0|]׎<iw gH ]Wj_im&Mc0c0c0c0@Ńxf:)3f{Lss`9nn>8*V_}(ƀ1` \[[+uiƀ1` ƀ1` @C]~޼y]%l&`ĈE: . Ǝs1lfSo|>Zwu'fv^|B6{FO[ g;<ɏ6YǛ_zᇗ)9~Z>]44`ӛ^?-hѢs޼mAKGKmkWϤXھ43}NKoGмAQz]e⅕=).V0aטjfVۺ[&o/a?$^l.|A@xB"TI<1}F|˾,R׍<[i`(MOp /:th#&ƀcq¾6)Wh9xhxwmmy3Pe.b&ŽqyAs0 َgFMU]+WW}6ų2+ŏώOvVn|xN>C2dH5:<6_ww20`1ZR+ןޜh_.#Gmpp0Yh.VvG r6~##C YMyI!\-, 8BXʎ!±B;(}J[_Xapm<[S.}D =<NbZ<\žlSR׍<r1` ƀ1` .r~9rub[Ϟ5=o{jGc+NAc0`Y$C0mS h0L$E@^k>cIyM ^X4+vVCJ;>gS N*C ei?{6i<$'&Z&8as¦NWz@;k<0@;hmcTֈc  k { NҎwX >#|$9M@ ߴ*Ѷ"9J[_^@m0&[ڂq3lDl5Y!n`<8+g ƀ1` +fIxef qg.x|,r+*絿;˃++ƀ1P |\>}+ # ~4ʓ\ !% }O"s@yK 7AI DJUjBbI@@_dCr@t.a@`;MʌnomA&8I S| W { Yd v8Ym#  'n>-I%g '\ʆ6#glb\-#ȆB^ nw(0ZXO!GqGPa_ޯ}إ= "ԋLS[9**(f0c(_Qܟ9;c0cwkZ\` Ĥ? zDl Z~ҢVD\H;eǏaHG9t&@qr:XSw$Ԛ}8N^>?}zvo>|zRq\::tp.̫:gW>쇳4,:ućK qh4qmMc/9ꛗk>[v|t֋Lҕ]XiF&`l%WǾvۗ3q޵$_3ft1J / 6!Cf|1w P;Th6(OnUs~!0ap\/pj0Eh 4Sch, ]!+̱pNG gR RRrOVܩ2NT R= E෢Jv]2 [8 l (|z#Vy3p%\uv@a?Ys5/4Y3vтO^}IKӛ0~}.g^}x/xj_jKk2JQi%|%0X c^[CU?9|,O",\i%p:,JASc5G- \.@ N>."l;^JxT8QAR@6($ኴ*Mk [|EzApv۷ע o ? .$%0 w 696_vMdaE}7vDm_ E]p9R Oko++)8w͖1` ƀ1 47̯0OcfaamaW:' BH2)~%|Oppq psA)k iT{Sdܴ N6/#"_ ;ȕw W .Xt5f)'g)C'!L@΍Ad"t(@4ghqu O m& /!HOx;N teCҾºr@{"!Hү \|TxBBi\Ч  A\%p#:+)nmz@ @="|AL8UhA7.R<#/|^H+o{pӮ}akVgƀ1` @EPQkf0cZ .× g .p, l%S"#HA@7qAܸON#HpB7¨HһJ =WW vz9Q. .Ϗ+'_ NNQ~mf ɨ`]yu2w AƷk*]@dke:FJ +Sx|"0!6dOh.M!3Xe%ƹnRL(J?NycQK2\.-oG/$]9m'$O=)R}˵]ߍ'U-͕6x`ɒ%6kc0c iʾ`Mc021]8C` AW C¥iBˍ6g+`O&!pp`IF<tmJ˕Ύ$~ d`y=^ha^iKc_|c-DH$cL+(G:Oi3I㟏}+:WFRF_Q2P|኎MƏs;̙3^Ui/%,*` c"* $d]ƕ%Mq+$W>qϾv՚>?}z ir}I󌷼WqH mWJڟ1cF6gǧH%7}[0_z?Mimp=lx?%GsYG™?s>*Q@va7ff}ZA G xfV-ĸZ@`$ q<׺Q @x1 A"$3n0֓J<]iǸ}I?:3 Ϥ z@>o4:ԫKۀ l4WrԱ"]f(Drt쏗~c  4Ilջʵfmo٧7 ?yw~:?kgӼwǃO_r-e}fPf쳖7 fsdV~ҷϟz\6+-jӿ*d-ӄyiÍ; ;¬:C|u)$. ;!aS\ fR5L>?Vh;9N٤7O[ʾw_w湍VV }O1H:I$$٩j+q¾P6mRz5}w'GoƏyǹv^̫F:LXtN/H댯]}ך/=^{?(Oc3ʽOV%̊e+_3㯑YO Ew)p0I8Jp6!xʼ$Yy".3<"# LS@8nuP+ ¥BIrRmUO9KS0b $>ҧ#su3Y0cƌ ::.␉1` ƀ1` | jQ0c$F{ 'xH1M+}C8]*N8K(qϾv՚>?}z ir}I󌷼ه|=!|VzrS~t[X(Wj]~PM~\oUqzӯ'30֓J<]iǸ}I?:/zc(0?zu~LV}v|zwL9vRدjoV0k֬`ܸqfb d4IMezWWv|Ԛ}׬5}<4Oe>}z``6s`xYW>;>}CguZXsBk|{6-k-/GnCR>;gy. gyWH^GI-ϤތU\bBKc d@E\0 1қc(>;>= IjV "`@ֲ7-  sLv¾%y}~ƿOg짝׾;?+ z;6jӧ[erEwԼ)stJ3>\)V1rz޿j4Ĥ $!7cIYSV41!-5狺O~a /<8Rꪫ}K,I_ ?\tEԩSr=Tt^xa_);3gSYj ƀ1` 5~%R~`b ƀ11йf]5d u/YY)xoĈpJ1r`UW}?OO>-c;c0j1jI0pr.\ X^31Vn }ryY& T;M ^yoeGp~0,ȏ|#vmF+-"w}yO?kS8g {nj>=W_ ?$3kO2%8C[?ς[/Tm7o^0hР@ƀ1` N=j ar* Ub|zJTD$q+Wh N?OK3W蟋hFN@2s>;oԏ?%>Li%eް"ϫQ/|?aW?dp Z@1*}[>8yҧ 9at .LչJ05w.ߦ巿mhl/n;wl6 |}cA^bF #<~0v[Ї1)^>?$kyׇµ}z{a[{?釺{* IS2W!ΌwLO:/N[q~ҽ.f_5;X8ɴdѢEEͫo[t_ت ϤXھ43Ы9~`-ޜyyd Z|Ǽz0gf-߿n]Oq(kU5fNP9|pl)m[n9 FЍ2dȐo[$s`o|aYu~8SE?`5/Í?| }k_ ^_=fmc{›g>0aBpdžY&l#|*+ vz*, 3{8\t>q%yꩧsޥ?0hk ZZՏ:Hje__X-Ec9cLR5wq:|{^}|jGu P4Ɲ_|j7#mjߜY @<]_%}~<fp Dz'?X?)-k:ѣ&JlLJVe#6yIU^]c%J{iU< z}H8!q@[{Ci+t->+`3r9MW_=TFxw}wl1x/KEu]}EWwb1e' B,g ߇v-5cGú_W]a}V1H l)񸑚om5aD<5j}O镜\H;e;l5* u5B Vo ZP&C`{,|'3oȑ=ȂΜ93\YVB~ǃ뮻. D ſJ c"9NSNG,bV|ۇ4}j4~|< t7<3|Cj|M+z/Ilf<|@`BV:i}z㿛crIi]Ws^+ǽSDztiJmJ>Y&ͱi:g/RCon;WEx޴iӂg}6\2K@,^g?\q?#*<&MVN@w):f;6x׃n)8Nd{!۬,? ZkpP8_}4iR?8IoSk_ӛ<;?n&4y ⫝cycqN=O'tC^=?$nv|居k<1~a$.]3|~6> ?yh$~:w׺_@|?,&ti|GY}`Hiǔ_gНeœ:W+SNJ'GU)/~17o^.jgϞ8N?"f̘:=fs9] J. cP(sG&Uvܿw1Iҗ{]+qc@yd[o:w uYb&rO"xGY]aHHSi㐲yfߝwI}5?>|zny3p,tKܮ*?Ğ@HyiK}oO+Os>;zz}S%fS~zs>0PK]8ƿ8@|ħ;)NF}2l|YO=uwb\q^Uwf=rggyK\s5]4[j!Sƀ1` ƀ1` ƀ1` 4[!k4@.Wŋ%KT3n ƀ1` 4$oPu/Ԑ}zWwvwU8?i ~@͝L>^׹rY/l?ݾ|[o|L}q]ذmI|^}/NG{}Au> /g#osw~yab 8I?Sk_ӛ<?b^v-RאV ˻5a:GUPAJ'F.0vX~^civZyWo77g_z'9?=^j<zd _{~oO}itoKiOki4;uo3L Gz#]ڪ7ͫFN$>>s+WOݍt?YVMDpf,a"!\& a7]UxL(SBe>AX*\$";Fx]8ZP-c0\c*E:8HG+'NLO^zN*%7Ѝ1޵31M) C/uCU؁_y[vJ}e0Fzs>r?w9Ə~gw놞ҌKZ?nplmvgϞ'? &N]k6x#8"tM{w;S#>oQY6.#z뭃C9$ۂ<>3>8(?y{sN?c0cOhVmO nA$<, F$( b#&˔MX('pl\k@Ѫ6 ŤP_qŬ[;ZyNF ex  /L%B+$ n(3^x]Qpf_n?L aJB= Oi R#pyeJk+O;0Bm`糖2P@-$a_[m!0Xp'^"V%_ 62?'l7N aS0V` =-PG[ƀ1` ƀ1ЃjUT̟͛7" sav 2eJ{A(N~oO+l0ck> UzR6GF*h'@XX[Qem(!p r@06:!AFR;)& .$~P"ETpr2&  ]$l&/+|F@7!& Т ; |mpmKyOAt׆II+8ܧš hehFh7o N?aN^K\#40W-(?$}6>'P?,)į=@_1.7ya+0g͚ƚa 4#N;-n; 70+ڹ ߎ;[.\%\|s=ƀ1` @_1pdTKq70G u@ oY ^NМTbiA%f.LBL-U u@`зl 9㑍k}zW]d^w#ypp$^8-Q 2V8XM!p+tSB 3v>ۄ*O"pO qYmT.*;HV .u4l"o8V\Ώ6Q Fw*T] B{юRϋ8d\aq 5v 6)zn9 #ap@ ,$aug $$? xN%l$ W z!w=̣weq|/sHm_>}Y/f׷p⺰aQےz_4_4Y_Ux>w.yN?MGq P'ķ]ޥN=%cΜ9Lg&6mڴq]Rr_}/~1IЇ9?/ 3 /^0+^x06w7bqz}FW=ztjBmmIRg|B_yFF7N|zS\NG])qA'eB I+Iw` }\)M((d2HAʉs | _%"< 8!GG+OY\(Bp'3;g-.8PyBx\,<(u`Oi*$ap`|c7CűO2!m_…­ۑ(Q>`=@f>BҵwyT=J[-/^@IDATɸlȗyΤ6i8=;zfp'y0~ I]|w5}wzҤڛW_n}?ꙟ乓ܦI]Z{+チXU_=w\:.hwdWGi7:V`b-aC3 Hcrfm.sA$3 g]iҳ.ɳFǃOoJ;}|Sw#5Ǐ)3'TQcG1Ca)'Uk zC!_.a),;E).\*/`g ^v^&|L(0Ta; =j= /lۇ / j ># *\ 19ھU)A|@K T Dvzp>$<(PxZ@xua0Gr#F[' h'e qƒ|FQJ}}t@& G I²H7M)v>.|JJ]@X(!L}O>V!Y?\ܓ"!:?"Iɫ I-:[\^=}?o~UO9k|Yk@C}}Pje yZR׍<2rtcq0U2\]cE=xqƅ78T? R_kkzzc {w 41cXh(? ڄ|/{Q}\]J+P9^Z@ ?b#iJ.6IpDa/Bp!')Rlϴ;u#:LxRxF@H p4Y@~#"\POmu}^DmcVNіHqq%?- b+Cy3B`]G3NT9%a(%&>tɮ=/0?~ 4!`ՙر}ntΆq9E'i]P6>\N+sp𶰆?z}v$M?""`ܚBgqYxO3WRIOR?>NJ}w#ơJYZ $7oME'C1skcXH{xݒ%K*b$N󱭭-^}wǠ[o{1_EK&lήU~yϠ1` @2𯕸@R\mprGbᘄmm$ҵu褢ۅ {$i7y}CrmL;)?ׅ,>iTYҦAoCP0e}&%Țm b1.S~}K˾kY(хy;x_),»'Lxpxf Ǐyt0cA`Ƒ1P `eη#|W *kb ƀ1P pB&ƀ1 ~cC BBi>R|p14}|a "^◀… IjZ&q>M(fqP}3-ut}.uum~?|0|`Bsek=eu8gJR6)Wh2goƏy`뿏ϼz^`?BGx~Ϟ>;-h vƟs2@>`F(9u2f/Z;VZyΫRd7v|b׷縩ѤXھ43}N׫9~,ޜySwysof}9(uHwv)'izQd =RR9StA&M]?| }"z(t']eU|9d/ڏ#p"eU8qN>mR6sޥ?0?:Jv;. yJ׷/&Kߕ]wHg cW?;2dHQ=zs}RKǚgLʵz'L&+IaIfع EP$䪨Wp"U"rAD,"@I d3GqvWqqpŇr:7g;j\WV⧦0xd\T > = IY&F篽ea_^ȼbUwFCGi6V86F6v]8NTY4\己y/ ?uq?7i<+yQ8!͞vs:2ŋ+VچT}'X(O?U^[q/>>)̡|B?eW}3A=?h ME@$Ў0vHIӨkBp97I'\42e j5MYkhh06}\}۝s^&$O)@ܦ.|>x_|H}>܊:]+}kڠlBuBC Ӡs Yb~e? |.œ+vBҾdsmqedbon$S?s@ydO6 Mޣmo_)ϱ)%q~EişN(oB@ lnsA/C[i۠&LieAS!y Eگ1} t D 1uB i?a2Pݸ|j&l.fmy1hoǃKd" " " C@qm1gACCA2dV/C@m$ݮ8!N;3>"t&t eه΅x]jh睄v> ]] oqgz G"$.:^3zZ.zwʤChiGvGǫgf\?6a7y4Xŧ03fc|t]Ǣî+vV_m#:D FL,;zb0-'fL3ki|ƊƎƮcLõx?\sl i-gIRy'ΉOY^q= h8u^쵡P8ן P&p\Q֧8T[|ևu{Vg_ OaS]͆Z!fWl0ϤVA7A@Ĺ*lO/~DO206cm [A [3\ಭO0Ѹo6{e*~aS|à,c`Ã.n {U>6}QqJ>E̲e̔)SB]` E tٓ>YM˯Yݨ3_   6worY$*hz,_ :ޤ~1&~H_zoWfk>Ne| (Dp=biw 򎻯B?΁o>Dopdb Do :}ӠNu=?*;/!փ Yr|l<7p2o/B4[s$@R&]W;+S躑$~Ki݆ }ACu" "P$1sAĹh@CL} DcBj7QO?== PLi'C@·= Mw`iw x\%%_S?cK%Sy(?܍ ף˿RɿRWPD@D@D@D@D@D`H &s#|,h ğ؎}3.|P]@bhL:Z&[ &Y~;i;@@j BpĬ,/yOTZD`l #I1#F0̓D_$D@D@H 5&ƤEO^[ŒmV0ܢ7]|k1>yLDz 삪zWf%wܸq(BҺ >'7I7>޾[C^oվ=M|r,t%xΔ=a_!z0?sF]@AUn(Ʒ\TR=ȪvTk;|vJǽ>زv}p]!_8F%Y0QeaXWq\x..YiZC1Wu|wqp^O\q\~/oG|luElp9|^炱Uox)[菣v;Cl~#]-?Ipv6" K x Qe#싊S~Lι^&$>QͣqU7u~iϻ0gWys8.Ⓚ_q]-gݣkWHu>j6Üih³G`T ;#>>F *=$k`hŸ{Yjy,t>JE@'θ7OFG1J,_ܬXL<9Fie|0ӧOPE@D@D@D@D|G~Q 糃/y򯽽P>b," " "K޹}Æ ٷv*" " " " 5Az!eAW+һ~ҥ|mz7̠UVӠ!׎D@D G`MnVs" " " " " M l(c2w]ԪH_wwinn fheaN/ 0̇vZ3o޼! Z2}0̿([}XD@D@D@D@@;f;|p8qg1㗀1ce]<ʼnQqF3rH!*ԯ>PR lSzo_/ytvv9]ꍏ/>Yß~r~8?FYyyU~Wc_WtW|GOaCϺuMU&nh5ݍyӤq\7$dO`Kmu)g/t>4Ҵy%܉AA7fz_'}XoЁ lD֘t4

([)txؠ1\}z7|]!&>廼W&\bDXh/h_h{LƤDcơ~ NO!?bƁdBhFV?ڽv\:0Iv7Dc wzIObzdh:(lb| ^eC\ohWAJⱟuB4&NX)Ї &h<a.hd0'^ܦB;2o0}rlݡmc=@4ĻCo ^h߁L6! d험a2eoIJf[ q/C4xvxLdG3un c^v.hĶxbς~ flKi}(CK" " "PzO8aV!( Q/; #31d{L: z/t9ƻCwB@L c7-˛BL}-[IG &hY2CLt 1iB7C,y- 3;D{ :b⊉$؀y?}L~] = 1E$ ClF{?1_CzͶeQKwuuyf,hFe8܆ 68-ocC8Ⓚ߮ess1h>8OCb'o~[vꟸ. [H$`x;':Vh_W}U^N>a{hK?i:^'[cH6Lh{{!4&> b"o7!c## K(/QLm'a噠{k.x?e|}:̡[!ڭ61aHcx4thwbRfQy t5Ĥ_@B^ĔL NȖ] } : eم10 .n? E٣pR!ѫwD[OLqxRv2vPvN 9pznԲ*nOُ7 @ hȓޜ317b6 2gL> ~ۖgB?'|xw?ݍ]r3qn0Z8iJ޷^2e;op]TB~OK||~Uo|xd|7>~O@| sG><יz#>yS,ן0ಋgR\06k4şGX}f\33Yc } \k߰nYm˄5싊%իѶ.ڿ!ovp^U,TW|C{=y:.>;sq+/Wϔ)S8%?|-oM T~kf9}#)=ׄ9/!Ni_Cfu ~/ɯ &yh)d0srdP$VzdWt4tt<!֑b4ğͅ1q 1e]Z ́hwAL>} mQLY\zzbҍ'rbnw;4h߇~q'!Lg@7@w?ɗ4bA`:Y0 0c7'-(qѸ &^hwB+4BǥF~2 ch@lCng'1xdi*De/Xߵd3?D!b ϗ/xs$#vл!)ִ߯阏LTpq5V,XFW4j"jj ED@D`Gv>t%t&4uA?N_cz9f&&>C֘H9:0 o@3˜\11}h4b5ɕ_g)G3!&hC3Bt. 1g({z6߃ n~>0eC)80ĄC1>R?y4BA+ڻ{s~G?wCg}c tŻXOCu?:j΅ӡ]!ơ1.&h4D4ȃ _VL6! ۖI ,j1w,tW2f >XoQ'A'Bh!&ǁ ۅ1'm6d ͕ۜe,̅la&vp-=D@D@D@D@D@D@D@[PA7uA8,Z m5A֘  6CS1IĄJh䌵阹ZmZ &Ą]4<;IULom̸$LX{>7g^ueaXV(h܆I;~"`V,SQ'B{ b2n fϻ rXvI];5˳!GXs PnX>ςn ~h뾏*h4D)^f!jnYCloo7% " " " M|XqQ&?v8bl.jĠ#4mh.Z?ml|E㝓QcbB?q2y2fV;>׶qL@xg] 8ub1ah`1 mǤbUj}G]tY|n;3;BIL]WlܪI=_~l$Վ.ڷl2a3jT'bW;I_'i;ʫ}kE1~NphnM]8-v6VqTw(G2(D ֋l!ޝhY&#z=|u Ŋ_F7!h),kT[^}.nSZ>Fq9#>>$7Ž.[/+O`( xϺI4iWyϡ?mrZ'|eL4/ KbV" ]oowGۆ/*NG僿F}ry`qG3aYW_I͍Ŀvؓ>%i_u籕el]_^g*?Yr'OvHZOWyT)986 B<]q\~ƪ~GD@@ϽlןB׍$K̿'hUBD@D@D&L`Ǝ-a1季@=(Io޼yy W\Ct:Ionr4<#*~ JYzwZ.ge>ovtվ]SFs]R5k8T\\sK W?q|rXLd" G`m1#G4e"P^nzvUyw" " " " " "  =o=@G#;mŒ5گOōk쭏bXQI27"o\v8-m4/D69{/t>hӧOǒ7V bĈyR.0~QK/LR|jC+5HԾQTr>ɱ-_>ttH1[TFn-gxᅢ}>뷹9WΩ} )LZZpD@;0X2I O?bѵ_'P >6 .nik*"Px=x}8e2dLoU@Gu&\&¾8#0z(gt e˖z})ɓmho4Xŧ0Z3goN;?kUO{r%桇V}lO'J/wk.M@Pe?a{*v9M2];ː8>-~^%2ⓂOQ; uЫ@xc2x|J=7IX깔IRrB ƌ2'?F/" " E 4:n ;Qӿ!L*W1c𳯎[" " " $,(c=$?.˺Mv"9nT=PYt_m2 /K]+&ԞD@D@8jO&" " "Pi񋪤@8|c24O>׫\BMw|xOhZݱf ]("g =  mj|85L IqF4<)q?>+G@a>pWbc'3f3VZ%>W#>|q_'Ӻ4v7>Su0iӦs&_ X^yǁDӰ,xjG\[﹮~[U|:sg{ 3[_ !*NWڎ)ѢEiX^KU5{.ρoRÆ =:O0ښ1+˯j͎_8ts/\ҰnsTMoCO$WHg̲~~]-;{e_U|6aIڗɵҒu^|Y߄,X`34\+w09m%Fgү&-_o :@tG-y&࿪Ykk>qzn0HC˻>(@ y!CvZ]FB^>C@1̖69w&B`fe*R_eC{:#dߝcrjQD@D@D@D`4܈3. 㷘M4~t_ J1binn'yD@D@D@D@D@D@)# _G ͇7&1DHVW2c2C~m4`Al?n8?qUF9s|LP~"> %o..?kuq+˯gxБb^I?MU^6|wb?>ۆQD.|E>N]q\~+nڊoz?zlӰciS<S&:e-.P|wk0lmz {9Qh}EJ! ^vqW}S3?㞏v]]~/oG|luv[џg'S u?@f>S|e48ŵbx}/5O0sm4w~ůχ /?i;HԨ" mxeZ8nCS%➏N?Ͼ$q+˯鷌 >q3|WSgWc#O}Bϭ,sO/*>7F7458ST|n5qquTaDq(tVq]|rr%Ǵ u+V0oP?L}"; n(' 6 M6y "P;jU[J>/QOqFaY;rIvĒsvq?ztjÇRq۫.Oݮ\ BqXp=]'H-S|'{e(SL*:Smw'_o~+>CFuP?qe˵v[uV盛˟mށ7*?tqvo7ƴϵ23:^lRX:j4B~S|77!'}o\Vo|r~Aԇ Na 4ɾspqc,?yj_|NG|G ک IMhE.egF>%; 9ǥBD@D@D@D@Df |kךu,U\D@D@j@WX1;D@D@D@*B+WȾSE-?i_<[#Prg()J$O˗8LD@D@D: YF4U}hcfrԝPr$>l08͎PR*6>O_ھi`[uO;..^):}I`pslr__$>'kOc>N.X);ۗ}Z6\O)-NeYQDK>//4;b(oU Oa‡G=_[+몧˯ )>clqw!*>sBHҏ^..C+Ob|4گ=t~<aY<ߏ\'[pYW?Im| 9 飅$ѫ4F-ݫ}m0u[&P(;sU>>w E)h;찃6lXѣh>vU}..?|ȠT|IwU'w}rg7$2|EO2/d^>K-_㷣9To@Fޥ@?PYhR1&Ivm'盛_5o Uݾ\֜E[Ÿ\khjHtn0jcł?T-@ӋM ~gaiz߯S+M`l*P_E{&Z䩾)E@D@D@D@D [: ZRMwwqmq\iP|hp{|\x&>}`]{v͆\ϝDnڴ)uƵ_W|W~'^ר]ů\nh5ݍ?oWP |֭[^N[ g哾sz>ژH5mk![W9V?kV2mDճuqoz^=\$3ڸ qg<^$6!ȑ#ѷ/uqǻ8Gjm`N] D]3%T^gt~>G||P5;ٚ|GijΊMD->&Wu||}V5Y͚pqpy?/ƣCQq ]74t'N(]DEV&L0cVpjz$]]]CtH"P3lʰ!K1bd" " " C~E@D@D% 0ɿLOclU"%O *" " " " " " "PSͭ3@IDATÍKu|Gcq~n]rK!poUןZIi%-vVf ~(E!>)H=?˸<ş캸<ۤ3'ir_"Vm|X$oY=_T{z{*{d\J|'PCOC ORvc~Tuͨi+ocōr7<`~Htl`~ƨG88럭>'v[;2|e:M9uFX>Oĥvsh"b [޴Tmb骏ħ0Zã`]/Q{`]Ǜo)'A||~%>gɒ%޳Lvm7DՎ.8|^'+OM|N]g⏦W~sZ=1kŋg̘7\5&_ g۔jIVυ8_oJ'jH\U>⇏=c8hQ0HRcc6.cВqg$ؓ>Ro|\_|H}~>8ܷonҊmwPV_Ǥ~/ *:Wd?uԾl-ܧk};uL/WO#́Nqg|/7[Yn_/](vP?Xۥ~->n5c`ْ6rv@ -nyy%*GZb]/j_@.whof\G=xλCR?]4wKO\'0dh_lfO`]90⇏=c8|Ee˖)SX4+ V|I_p4.h/w"$q!u}lk:'z4%w{pBqFnv9WU>_qq ?䃵QOW~\;ħШv. ,?c .Z^5o/x~g3sig`y;_|_^K̿nCD@D@D@*@`?'`wڛ``j||1!R%OU@_xH@zv>ɌY߅:/@?CVUv%Oqf5((;eC}_&" " "Po-4#frG/vh{h D{ oΘ0=}'-X\g=B{@77>i?ϳ /zG7n"XUlW}e׉mRqKK>7$P!o6a85OcQ8-3~x*[|W{%4ⓀaׅgR@km?k[Tuv*k}˻6V8[m?lPfYУz#!& kbF[Gt|C>_$[q|&6gYۀy? zIwxc^?2hh!tDd~̳.]w'?%*>)A|OtÚ+?j]\'P |HpOҮzb 5&h ៘#r? e90&] 1E3J 4k{c6w}tt-cI!&CB@Acw)ލc &5]p־d)tA 8Ƹˮ]]ЅAĺ5Al& nlo|ZkL^WGM=ݞyKTnc|_8h#,kwtH'77a?ٓ^m&LERcMo]wyg_}S)؀8M;Q}4kR?7j^l+>Yy pf>_hZWnwθ w/bB(vk5^{ ::bw1)u8} :zbTsЋЙI eLZ1e\vM}pwTa *H;Qq,3>7sV'>ȑ窅ۍ1~7WSWfj?g?pQwL1hL1&X1;厇TWkc^15W@cg[Y. &6CQP6-mK;1I<ɴ@ g}PXcBOeWτ"I;()_ -Z!˶4QoOy1Ǐ;A-kٝJj톒 ~0S:߁Ohׇ9H݈T ҶdPtn1T~nnn;W&>^sܱywnB򵹯!?T _`K_K\DZ=9t:CpZV7`Dd ζCLY3' 1!EƏExs hsgcmн!kL8XǛ< ؠ3;?S{lBK0hL27W`~nϺ3y0.m?~*$Y=[x2)G۟x?q &athWgn>Y[=E=D݈4]c`vXnt t-1w ^~ =޳ۓ/ytCL XI=yu8`A M9}❉h@'Ad5C5{n'HE@D@D~ Ϥ &1 JTcbwgLn}zw]eg9?eo u 1v5ğ$D忰 XL1!HcR .4.<h6ĺ~5Bw@Lb8}1GzbvXf- Db.>1c ;t qĚԒ%Kx ;|p8qX7n4#Gl%*πn/駟6K.5G}ȾO~">s5͓O[n8 뿮s˯G||30nܪ^z!s~C1G}hxJ[f休&u͔&mh!Ą_GG8ba&ۻlV-cEDk; Ow?@AC[CQ۲,c1noHjB<LD lVlۨQg4:(>WxzSNaig5#%s>~$*$na?1 ?lذ#QQa?.kkwuo8.?n!)9 8JwJ|' F&" " "0 }w'PnǶ\T '֬Y%U$kyǝNW66lŇ2 IQMWZe6mdΝ˪X盛_gjKo#gbuww{q,zu7nm0IU_}ì\m6_j>%fܵt7!ߴO _Ir ו1(j}/j-K}]>&1::,g@6]Dg*R( &uO)z$m;M?b~]F PvyM^7qd~y[=HgF=?gJ7oƟ5k~*%*["Ww㹙)v˨s?afe5xDe7Bg>IC/)>+ba>xPxzG<sڑ?llN+?]KR﷤O0wsV6UcCOL,LOtwu{+6vs#nyysֽP]Iĥd]tq\v >>&{N;4 7`/[V*Gm4uT3sؕx… *\nu_ xw:gzs衯`4}-#69\|q'BK㏯Q%W,>uo`kc:eM3^Rä́..]KT?=-b㸮ħ|xp\^T"H{}&ȸHf aM9~neCu馦&xFኤ] k`6A|<,zHuq4c/;. OADWKTQF~ЪA%|p穫|_W}U4pq ?"&V϶n)?-5c M KD>Wotd-U{#퇥pg+psHJ`>6 x4h,ՀLLD@D@D.𻾾ES 'T?%1ɧqF&" " " " "PJ)@<~`p }78^D@D@D@sPc1HWvo^(58ckl'W8K>o[[Y|nΎPgn*oGqqpk {pzY}}"\uMOm2NoEmOW$#qqF3,=W6p*wT\+_ސv??}S.|*k}˻6V8K.56m2_0,Q#1!svV;kl~|h6Cr#ϗ`E8uFX>hю &h/>㪏,CS b;K15 W? fL^Jq鎿 ͓a"aFkQ`!Ꟗ?Oapl+*?Gt_m(k9hĉvћm}ʤ~n}?YU/޶m=]~>nyWlsy>o\cr/*NG?xDB5.?ͤP'7Z^E @%F؋k޴T#IAGT%*XQ_&y8|oIﷀO0 & ۗZ.?7V'+> yr]M+k3krII#SgM0{@4'" " "0̿A%/S:8d %OK#$>dx,ݔ`ZJwJ_fV%OUTCї|pHw3:7.t\܌kfz]h8J~ Xb X֬@(hvvb$h9!+r]Zu>pr^ٌoG[-#G 4\TBIb~WC/O ; ǠR[:G-?/5dv9?S?s$`iѺ2fko%.L7#aްV`]nH}˃}>㶯-iOKӾqS..QepyWƊx?T]q\wqJ jV`}_T(%wѢEfŊf?q؋/AKǵ_ >=5(a)X"$'az7~qm\a3637h%ڏvZ߅C+K/bL/g%̈́nNj4fLc^Iݝ2=g7|O'{No qԮexqg n0U9V=Gp9\ρ68'?OK?ܦRg܆ ]/^l6mdf̘b*o č+O@| sl>rY1?mx9n|r/GKa$d+~ndwu: P$?-/p#oIRcVZeƏ%qwտ~[#ys|޵/XͲn㟂b1Oqʏe{wCG!llj;d}Dc[[vm? _\~2?Ysj_hZ?YZSFvtɳZ'⪧˯@@/3:sgur!r+@miL;;0?m =_ k[Z"? by!? vc(['0ˇBL},qπh@WrƟ# qxW x xs3al8drĬ LA7Ғ/]fEE`~8V2*yofԨQeBT ÇA%Mp]1iw$ecA+ j~ m]1aF cmXeLܝ B߀AGA¶-k!lW@# ~Lz'D[ =BL} 1A];1I6B n4,`~?5]g!>OQVfET}?EݑjbX*/"0$ 3lTe" Nbxy.g\Kkcǎ5 h" " " UI`x?1 m,t:m; :b&hoyG1Uv>8ab|wW!LLsHmðqL>ڄ&=eLq} jbBp&t.D7oc|WQǧ K@< q6 QmOF]X1'_Z̤O" uM?@?f֯k[]W?s%o@bczo+g.`1dC1xvrWhW&;/ƻS ﺣCQƟ~O2rԄ_q?L01?{c6{KBˋBBwE䝵dL3]JuD mmm%sۍ7JU^}SLbKu}{}=I?rHwmSܰ;f@ˡcC-hmo \y&;lm9<}X>bb-| ݇'A7AC>ckޒLX, -2;|T o`_ݶ{˳U,aG3|ݍ̦̘MkagZ3fL7lؼ;֬Yc::Lv'L}ͦu Lm0߶ٴnuٲ]H p\IOǻ:M^]f}liheؓLoZLznڤ_K gBhqkF8d3&>@|' ő9 L;hٲ=2Iit;SǏ2VsxF3{|=7ۚ{Y4hXlsڽ+EN6a/ 3M6ɌʘݦNu oX`%{ɁN~p3FH[k:O-e)\ʯ[<㈹:EegyW(y~n^qɓU+97+N;yqK8Nj- 1fȑf_<,Z԰aVj/G$+:F7lU5k1uys>`Z | 7?J_c3c5F; Gc?jmN͐Mٮ MgՁuc~%:Wfam4P>1`υ1l| w w@@"g'=O]剼 Xo?ڌh9>O>}ɜ1[O09cYtCָѴ,@7`š׿n { iJ-fcM8 8x5=!v=zx GPG|ߘ1vsWX-ڟk*߆wD~$۵ˤ/KcX_FǷҒY$YO̘T]q\~ .uqܸN˧u# ~ܺ2OEӦ"P:sn&o]9s/+̟?lذ߃>ط2%|4?Ox~ C*< ={>nOG _x[q#Ǟxb1x#Es lɘZwO87Њә1hp0!0] }1zl 1]& OЏ+z.{/V0st a\-8<ƏBˠס{ =hcn:QL(h`+]}bp$m?xPś0]9 :ΧG/E1gr߅yD/P*7.(Oɿ*Q9H;lU'bo(*p eOB?xdLllw݉A`AC av?vZ}FZ~ok{RZbNclRT+ؾ0ӧL\ef|Oʲ1~CCiw &sG^cǤEзBqv/r) N]:0.?I+v_vBcSB_Nc}w ^cR}힕Y, ]ƔndC@G".w8[d́͘1?ۄ=LxB-pN_"5/%w/z%|$n9zT6KK3`ϙCHok fD\4V|V/޺Tlw Vzܬq,Eȝx3<ͼzv&l[i&?}6scFv&ڵ4m*P?ϊ3axâgcK/#01s}QoN0;$vqQGȂǼvZ{챇9{˗j&|3.?u8o t޵S=GOʼ=#;k_00|3ПF`Ѣ-S晓O o?r_6wݵ97G~y K.bz)@M\rx?<|4< w2T}{psI;1Ce9ǯņ?qefqMYESp?aԨoI?_JKȞM^ƍ c#Ge|)ʌ7rk]+~ltYgӡU*>gt9cpG׈_g]4}R|CfsrdcppB_-= ={=fX0ÄԟSI=2ץG,|Ew# 78Ql_ v(8m&3Q6ԋ-l1=>wptc `g\ݡЍ7JG2;ǐW`0uanIU6v6v]QepyWƊJ(n]tjkecE{Q>|]튍㪏ħ0RL}>a8]~{`'|q^+WD"f7xo/&;0sA]v1L&22&x o_oۑy}a3 l„ ^/nZ[[fG?L/c^nF/q^{y ~ aj*/)}s|@ LܝsB$mǘo|c[/xQoɜx`QMHv{?\qEk>|0wʕG62&ymm.4g1ۯ}"c}o?s1mog0u"]p]$׫b#&\c/s/ܥ}TsL|K%/wbki߁S@ 2ofӾ iPk]vWS򓌩6?N|Dә_Utd@gqsۦ~؟\y6#雼L|9ΰnMl*ov;N߻ ܶw-ǂ#5$'!mօ-sۥ@o!FQ$/֮;7`քE-SUDή C ΂_n)\I_>'O/=n~⧱_ ZqV#B8{~c7'eN9*WG5C$Ѹok*WN4:#1׭jdt+gmUwEّs@k_%m?\ĩ$T@S pgV/|{uiّzl2MO8:/ev$XE?Kt:.sgno={$^|%OWx r4 ,QLn2 1ZogIEtЊSU]=69aƌQ4?Wk"RztttiEyz>H5㨻|>o6mM=hte^t?sH/iC"2;׿ɼѐxeY GgH͛ %3tB9s=ck]}J;{ 7j%v'V;59Wŏ>9%#$ )MvH9q>+*@#>u 5_,3ju.>9twFRr/x{=FD%̋'u+G@Ɂi?@8̿!57[Ѱ:7E, GAmq"Cգ31:&dVm+ tJ+)"ЂtGtȈ1[>Q/ǁ<ŧzK4r$~1-Yxg{Aȋ#:n68Kn0\#rrߘ1c=Ȁˑ"wM7ц(k6XOS'xBm|WldJdT_ #1!Cz/i%w՛<Ԃq8<s9e˺Hr;6j/38]> t_jix"BN#et\t;:ä4thoz(s ~`Ev!#w^z!=1p@~vcF=E6/4&Fq'm; [oDT{60jH>-?d8y)h7X1MaM%oing0$ϲT ΙD~v9v*PdCWkLכ0RC8GND g˕?|J['a@W'_~ag_t F]VN+m96yc P|\γݕʻ=#zDF _ynysZy`#侂:^\UE,8t88Tڞ+L<ĽW0!q z+1. [_>']I}6;<'&< 38&))t }-EY -S'=G&2#^zR(H\?j'>_pg?u6Zo'2lw>(a"GZE["z@i;UgᐂirA_Q_;zD<sqr[sRsq9/lsfu"/r%kFt 18OQ#A]8nnA4K_8R0}8k#!8*V)YqϬ,G3cݦ&X ;Xt6rp`.Fm2C *5u>r,YB|9T>'O߷=n~⧱|'QCzB|MJjOHOiWOB8TH)oi~^ {fh_W)i E Gqk>(+DTU|cÔ5v.GUVYT{c=:Ö/_nva?$OG20Tี K;wϲǮ}?"V81}sFkŽ}ӌiSvߕsM6 (K>đqJ0K4j+SE_> /UֳpΈUM3LB? {bE] ;CDKۿmVi/~q9I.襎Byɓ͟+FnA[ ǭMX*sESᰛ+{۸n`,ڙ+x0zrԧr?Nu; ξ1:u:j}?J'ټ S} ڲو-ۄި7[Z>o?ȆzBgELi`:?}sZ>m9|=ZQXen>4}pmᤞrDb[; !׻%>|$G:]PEA1oMs“Ct f0 M&{#ly8Dr]$˪iLuKיᓡ!üp7~urT 4PĢs ˘E,Lȼ+3=^hv,l%}Kx߉!ؗDVoS#Ӗ?D3-@^Z=!yOx\/7菞$(@i'!r\T]wݵA&qtsLf>F䥓wNvSN q5Olڶ\n!xDWF郬,NS:fd:hD"_O7.g";vlrynyi^C_8Ď>2 tvOcq wGBDp> H#DR-!x"Eb9'#]F$:2pyXH" s .@ĤH "PF{ F*F @E;y?c KB=297kIc4}ёU{o (- `Q 쾌 (ET[=-@("Ǩf;IIPE@PsP<z,Hңx@:Aތsw#Z N@G|G"kEāCoZg!љq, i. Ht*~$NY~ $:9H!I`3FRRBNG~l\=^0KsfG* /bʽ1x7yH.=>U"QV=YrhnY*'IBWaIrL o+W6%j"h{x7 7͡l-v#$+)}9#Ϳֺhboofhah؛GŌq-oHH"ѹGzD3vvC/#rGZDcHt@?o"#NE"G|#9XN?3tLD@@_$:v&ׯrd>,s{p<ƑۥݥqA&HGb]+ǝ)BXPbEa;a_tʾryPU,fGvfUS2 H$\lЍ*$BI|U=K5;O N&L/e-vwzZ2ģqzθzSڳ֚!nDaVOH y7I0\ȝt2G{#*soxt#{'{?&׷"MC?!q,l4S\ |;Fшb¿*oh,6eD'>h6z·mmUoEǞ͈Įr2m"8/ײ|tSϑ ?F"1] IJyP$w/#XO#B~/_Ǣ2rYUEx<.2 uJg ǽ@!/鈪(BWŇܪ*fI]I{ VHOW|jП*WXv+NjI7?ߦK@j>6uaD%m'ҧ`g9V~jČQndt6p(>P:Mv(<$#b8[no'E? )+/*[9's Gƹ{g&^prڹCj./ה?NS>۴TltQk>GgJ|g f*e?5m;j# S ŋv,{iwe,2Gm^2Ox]XŢꇉ"{?e מ߁qXPh?Xɗ.'"mm&q|[X<@{w^0D=}VOH^Iߗqi.|n}>y$'-?ҮC)>ǼBw @ $ E X! 8u#[lW4z%^S\εYm&FKOy?PsQSwHSi7 ,f^3H[w,29q;0م`[iiӐri /̪teFܷl moG+ԒFJJov(G ,2/ 1?Jd=$mp8?i'p 53gWqgl"Fv#[WRι8FH HCz3ɕ8_#v$+N㫱ҞH$qּ DҥH#}iGH+m)px^tRS=r߬2v5 L Ҿ8E{ qz GA4wt)Ω&쇝COlHz+5kFJe.T:E b%E@P^#WҦM "-!|[\B\ۢW idž+җqGc Hԍk|sG.LB) Hi{0!_3)A"o$%E !k sWj7o<6wHhkqn:0}j}̐C"GptiGtPp8n2+bz}I$:p_wL09o]B;׮F-¶ L/CacX&]Fb+ń )T0XPĢ{$rt7 & /Y|v>0`Q~*W0=s(U2 =o<p" zt|rb 3cƌtUZha8wmo{yGsGͰav]yWӭ!Cz0Gu;̨Q̇>! /Xuѣm)wy'> ;U_t~#b_ٰa?lxoI׾ 7oz꩘{}g6nh:C PN9esϡ擟deg7ahk͡4k̤I=?::gd-4fv_6߿ٱ3Yl.`^7_K_ZbPsۛ,׺Wͷ<&$o6@RR!б *+y+'"{4( :)8lp,F^ھkU\r-\`Ϧ®B^HڥkͰp%ޣsL%g[ϏԴؑzoZ,|5-b[: a'Rۻ*HzS7~tttȪBEo-]#ჹXKit2+ S?zxD-m?i2߈/~g.fʔ)p`6+|NU_b|Κhn׮͛cymv.Cu{O1u3[fV̛^ogƆÏC]waOsϝ%,5xsp*r3d<⋷77߼g/y[Wf-wi?Y@;m_]j[cO{ ⃨u6=_񉺕!~3_k׮.@sZ~OsZ!>YgHv3OVFH2'-G^W|3Fq@AdR^([8Nn9HҕW^i{v:QHSqѢE68oĈ׽unG3zVBgq0aA!G!~򓟴}V-z w _;2R2gchWK1vo~h+0s7͟،fx`u R]~286GޙgN2v4ݗts)„ SadKpXwW6;Zp޼Q榛v? .m~>gCkm#?MCcZ@ݿi ɫ-A18B)zvZ:2Hm@;:I95p:^3Ϙ_|:9UN;͎hիWtq4!G#D P8W`c:wW8;~Ǖ[o]kMQ/=dT l.N0SpG).q>ѣ4`h>|jkD? \y̛7knu5]E+v:[֪\{y}vE@PE`GQAοn5Sh)nG{v43ȑ-9%NN-8rH\s@8=H>uΛ7cZD!#?D9՘APgfР+w'/ڍ{Zގ;Fn't=N=^fΌFPnXC+}rZ.s9cS*"("(@",P(bko=CmSVߝJ[H:G(<:>_1hQ~a;ו}k^cGQٳYfGHt hS888Bp̘16z1c=B%~?[[{렭-c:} q9l3Θh>{gNes]dr-; 2o}hCg~H_6\_Fw)~q/%8D%ӎFB7":⟒" 廪Ͼn_j=(>eV'YXCzB|ѕT3?'OkHO޿ an{5GyKW-| 2꧷zBzT!BV_喓<]wol]s@F}'~3,|ꩧH>;,FF!tK|Ȼ{lD`:`& ;N ?Xa;n ^&^0\wL 䓭O-?_4aN`q5~| Br]7\A[nSSڶguT|UAP~m?D $Ǘq/A 0RbeopP!B|ſY,R(Pk=W_96KH{ +!$O}Y|p_GZ]mϟ_*6L.1BDug>ٵ>:8"oҤI6POqv.Ksڑy t2~(mix͚5v*1>?CH@g?[xٲeV? HoжK:׾~zRVNer_0̙ ;`:\H."N?2lTv8s/8]q<ʻGe$cXiW!<GЭ"@Qt8&2>_z6Zohn㉞5ǝ$lۋk۩ŭlw<\Yz}>N'9&EZt-Hg"G:S~o,nŖB:ė^>ۧ䣭y}%GCBAPJ~|k٩ ⧩__ /!C|+gu5.c?&>/z헡s7Q{BzP' aZ"P@ҨdiۿtѶۄވGb$*ҧG rC9-pGs?CVi|jEl]pٸq5kV|o3u/Z͌}o?׈}+P:E@PE -s 37޷rT-¹:vE: $:^ʵt=>=c<ӂDD8$:@"1G HqBZDG"?bsCzf"))"(ő2@&h@k sMD w5ѵ)"(\\2LAmu{0.uENŖH%#EF'ttʑ蠻 HoA" v/r .D8:tҏσ"퇐!HO9Э"("(" ; bE@PE`G ëy6=,#8mw8W 8NcқwH#v7|g8w$ ;ҳHˑ8oW$qD!=4i7)`+)"0,A\HkZaHX4 :a|*lqr ;n8Qč*%EWzB~"/9 ^ڋ?<P7*&j?QZ@: -BD*ѩ"c.Dt1.C4#*lc!y!|O%BwQ/_. ?uZ=Lm$늓|HO/?~<$.7!'ֻ_g~8SW$(qs,SeHW/q˂zQ[E{ Vp/U|)^=!{B[&گ\n>ʡTJ߿=-k?sWƴ䢜Q}v nqrոϔ=KΤRnRf-!;C|?B@񩍃'(lx_\o4ED5W'ᨊXXjHB*e$t׿IZ.Iu&FLO||q;%`)iH5M|ݶp2%5j-[4W]X¬\ UPE@PZMS1E@PE>x1USRVB4:::ƍS)"(@s"`amr_s"V)"(Ehȿֺ|VPE@PE@PE@PE@zh?[aPLPE@vd"(͈Q8~;inFoRzFxzMֈ@nҥ$|KrfF hioÆ I~}D %E@z%ꊻC3)JٲeKU{#,q'K(óõk@եV}BjTwB`6k=Ѷ `=3e-f~[L~f[6 9qή̚j h u|ȍC̐_166̚o1i {,g=ߒݻL^f˞[L{;Lft3]mږD~G+FT7}޸t=ڇ:ϙG`qIfM^]f͖7l1Ţp qiBPGˆe2ܬUR7Ƶ|OQrW-_}5'ħ8hu~"|ki[Ea߲tl/G0[:e:U8<9{8҉Hҟ1c>_8XNɜazP3\GWS辻/G#Vvn 0~.v\<[\nȚwi#xѱ8/bMyKgug~[4#йӴ]fį5ꛈt55FlFaMVOu9ykԨraÆU+(>Q3׶~x!B|Ňqfo?4Ԡ("(#0(em͋RE88Cm6Ugn$'Y> 8y8mQ= n|F=8b~{=ѷ H1XPE\WOiH R"(@_"krI\n]ů.C)"(@22aLMPjXrj(m9A" zEsTriމQl1X8vT1V,)/-9:r,9ˊ8|Tr*̊$8񣥣4 iF4U$rC`aoޣ#=tKM}E{2s1R|_,+wb|s S69,n6 ^6@G;%(7ɦοիW5k4:?|b-I\ƍf[o]:{,I>MUaZe{nizZ08bm_ƞg>VdI}*(cA/Q:t_.7fO*_ 8>m?ǐ_A@ ttli͘]5S9OlHc\pqɏ=HAl ifc9-ggvB`^6(ćmUyBԤfڐ.bn%ݏZ?ϯk ]x+R#ʥ8=_}_}dg|+kcXi!n/7RڏQvgP&_~&ϙĘI72AP(m C[E3bRq=~1;rC^tg0 +`m +|ynD}6YYevQ55o V?0UDX/< f_$NwLD៫ۼƘU?\e&E.9$xYa%k^Q/G^\E('^S%U|Ef/g7NVx!=!J˔T>!'ѯ9Cx~"oߣH˻1)'NW.?GH c_˶zB>}d8(>|av$p+',#×|CW@#v-RIϗ?a}\R> $>IF_/8˗|t2Qfчٳg;p? vuq#N-O!,/.^WO/To_*;zf Ϸҁpbz8G>[>ޠ?cSNGoyP7*\V0s.Oۭ+9C)yvݹ]4`x‰!TO='!3eрn 4{*?"3fpJ٣Nw>ъxəuӥ߁1xqȳl)>y+)* ɧ7#տu5eԩf_'}[ks@^Iϻ87(TuOuP.AHJ"("dlU|/(ԭEо=+zKg*T=qNCCpI\z=HBlۻMa IhOȹ̢>HO>p707S0ROG&F╨@jv553Š #Ю"("("u"{\+: fϒ(bW娊/U4DݛM׍N;::o6Z"; !vOD([Ň.|APRa|t}bsb<_@ N4eUe#n"Z1#{8gN17~1&9_WCzB|ѕTϲeLGo'|3WD ɹ9WDσ+!^] ܯt%ѯ~ѬK~+O˗2TRDڕH6NB|w!|!­?)- G+I~xͥr!O|,nCzq#85>e[xX9lHced/37/1_Ǒke9rKEY%Nfv*Y3Ry/W\~fy2` 6i+z\9_$}I hgO_k q+د+~ƭ[[/}~=n'Iz<w H$,_;='-4 eU?QHC7Rx*>D@PWP 'ʨd1(CD \ovŪLӖ!O¸\_a4㡋ŧVaB~|X33aW>ӨE@PEYs̘1jڥ(}SP^t, Q8V3sbxρ7;F9;Nzk`ވӕGWyhv/vy6""d(ɹ]sv/+Qyο1ʓ;-, {8z<foQCozS^>W6kIDATw.9iRQ^ɬVsE@PE@PE``mUmG9T8qJ9TxҗxQlϻ /xj*"މScD.a-|nf#FNy׀CS"(@ãJ$Fx$I*Ɣ_OG_O=\dXk 3= O@] >K,1ׯ7gW.3(C>cH^Wf~g63"|EE(qs :cPUwJ?}Ϗ{T&ua޴}H[h3o"Z^HàSV:mwX0 ?j ov]pMΚ”("q/*D^p2Cʮ߱=[ojp~>dlv8n,v9+Șwq͟k3o.m&3y+^!_3^rT58DwCiRF3mpCLӖ_~tttYfiv* "ݺ"/$#B)>վ-#FE,B}}MFG!g._mQ G/i#܆CZ>um$BֈOz"'B!C]1_#/?گؓ+c&5A[LU%Q&0oqIyQl~=6{ h\全4F{k.CxY{m)yv$*/O6+z# KU&a}?N땏R|6r}iQ9C*/s♒7U+jY/i%|JV_G5ơoi=<9""("("([qTkHjbڼ^(3FSjOƏ|fKtmWbovÉG s1瀗y#DZƈU⎵3"(@S!pߨQ4OSU("|? P͊"м|7Rc_ !Qw = <ӊP1g O`-4L푈5=4Hq&dFHm(AAC^Cxu/Ixml9pKX?{TXTϴ>o:+6H6;6cƌ16mJCEE@PE,ƅ*~m%XGJT" ^Hœj̚טы!H6۵3as }LC!UiYSN}S+i0<:=G'S} X>^w1 LԱC$nAGn4Cwj8x¢Y8bF"^ČՉ) 7?"(-@QBsOwÆ fp/“TvjWD`gWG_WkDKTG!>bIk 5iI&{L7Z7뮻̵^k3g9`-k +>D iqcU?v_$f~3ڵkN;$4T?-]ԈRHȞ|!MQ|"jG񩧝hiP)zT}饗Kٳ#ËCUT1!'B~?+8i1P\joiB\sZm VM/~ڲpBqF3kU juQkX m qPsDZKH~l>E0t("(}_H4 >兪U"("4oP[gA\ò]36("(@ˈ"("("z47|ߐ!CI*"(@WA| Xh>zRE@P&BLE ~z-3a$Y@S%OZq uԫ'Tتŕ}jDm?ÍoLgʘ6^wv뢒GOCH^U|jL}}J PZ~c})\^dQ֗ EWR=&L`3WD@0}~r|Pw|~>[4˭!~_,0dOZ!MQ| pj'a]ahcj 6կ޴fj?~vG5!omKkrs언vhpm8=ys---ǖ >eYSY<ף@xNW|H(gm?a"D(qh59oCv̤C۬g+MBrQ.Aq1mm='GnC!-{1~ols5VXaV\MBPE@PiPgE*xz("(['_xyqzU@#%(/lܸ~EAPE@Pz#O^9m|_vjNE@PE@hgRRBοֺ|VPE@J`] weE@PE@PE@e KùHah#%I%ORyOMt)mYIۃI*j!'])zJ[6i{<<\??$ aBJB8mGx\9|HO/IO?dg@&o_U]s(wGl)tazkaDãN2r⢜/G8 ɧn#mP; %޿Oۏ f~{{1ׂHz =iO@7G6mW}?1Ui-^ 7"Z~CvZeA8>9'SY/ߕw?hhǶFB5(3<Ṛ'C0zEpKpR|j+G?O=O`?'dg<M!OB< KzbnE@PE@PE@PE@PE`hxkךu֙ɓ'Ei"("@jW|J ~Lib =ΙSͅpW^ ;_H`'34; e?r)ܻӘ?Rik4[R8j0Q_{~$Œ1VKoT} /:f1^ϔ+c2ڿIcȄwQ_zNlU_U`ŲAUz,:jHHXjkgw6?sv٘mkȵv_W*gjXߨJUk43Žo7'^M_BDvųMw$.jEuUODUz~(lmã2  Lm/KK?jeiq+T|8Nm?m61O gϳ*NP론OcDVKOc|W2? g%!`b<ӟ֞՟h]=<wmI tQ_bOcuzxLIK/nܶ+Mft:.SSzN8ilu,>՟?dZzx^_g#ML]iK"XV' +~q+qg|\8]~ãlEṴ|ѧV'B ԮGvK^ ɧnOcqyCǂqv?d˧m!=!?^sHr.SyC ߷'G/A,ٶ->۲n|P}zߵ5d-h;Ym-;d+b!=zš !<%qmoڕ?dOV~waӎL.Sgh&UD!=!ܨK'B!_B!B|'ܮ H.U_qO2ŘZP&WOiч2Uak׊.G@eacVʌ7+LQ(jUmbkP}Q_=W~8l-igEsG=/ޛa̖![❅W['닻[ !K[}jjNpj%1e^Ot.M *tvE33g WHOT木Q_%=>N򩐞,Z{o_l[%bEX?_62 傦:-/sgC/ޛu&t! ŵϒ1YUmc25W)aUj[z c,b+/ը4JJ+;,2{}iKRCE@PE@PE@PE@PE4i&ä("([?|oذaPBE@PE@׆ w-_ܬXPE@PE 5y饗R "("z{_W+8%KE$|0#t)-ʕfft.RzB_^V|Z` (>qNV~7m.r(T!>=>DKOm'Bw8hn̕u]wk4gs 'Dolb~iCv[ OtD sw뮻 ~㏏qQE+-ƍ SҥKU?$z=(Q|)>/~&!a[çþvR|8lkOzݮ3jԨH66n :HIf_'ơ/O~v%o$ #v„ &,"("?f ù))"(֎~o5׷"p:t֊^"("83CwE@P~+)@!P_ٯ+"("("("(@\WW=% ,ʱOC|=&;Uyͤx*D Kgv!Ԯ;mB'jYiq, &D CzB|ſwLW8ڕڸ)>O@-Mw ⋮z~"YnIWpjiSȉD@d~z3{l1nCvٯ|Z(> _/m?r7G=/5kVb* "ݺ"/$#B)>.8p8^9Nfo+Fl6[6$ (>qNVO=|GS|Cm'Bw8hn̕oe!?O/'}S޿VBכ@OS[ZF5"("("("(""hF27n KRE@P>XPE@Pz@`…% Y+D_rټysI;!|JݺulZ~ſwG[ݳԧό3lڴN-}i_Q\O8(>܆CZzB?B@񩍃H_m=N7*Ma{k^#Yo78Umŧ66*➃ ˖-3SN@ mԞyJYz"p2fL9ͮO?$?5iƎ\72V93-:EG=^F_T~)ۗG֎f*BiLqKfÆ flQ!>s!{C)>O@m(>Vh?o{̼yey^t_f^uhU_2hυ,Kx`^/ *&-?oK8`"(u 羼PաF*"(@ $5QAο2Xx耚ZsFgΪ[nٵ0kRk/x_5uj"("("("5 w 2 hBKabL 1οGQv-d("/3/k!"("/{Z"G7n(.Idzps~àa"4c.U=; tƪt-ſ/BB=}Y?)ſ6Ok3~xkp~F뷵7T_iQ]w^{mQGUOB2BVn(4ƲV8!,rD &aA4- )mXEiblzkws};wx޼}uvf̝9s;XR$t^'|$sO!~+?sE@v -wB I(uGI¶!HuzB'| ۆ!mIV?3k/4ڈvFf%F nZnR1d;s9~g['s|8?qZI+H@ RѿI2i|@%` zu\ @>|"C6d𣿿_N @ @%olBʀFi%A=  $j 0> 1񁻴>immq)iʧ?i23n2Q|6FwTV34F~VY?9[lrOȞ=.u^2ظ1yy#*gu;3?|)բG8Oqܬ0//# C|U-OE=&E7R^AR2KC mSpϊUk *\2l,Bl܎vrwt>Q*F۱VP;Cfr'}d#P8nV?}>~8S?Os-PsZhʠu#h?PRy[Tug=r*ēb9S@_ /@ @ P;2'&-wOrFMOm&k*sU Tl0-9*SJbSO7違_ӼlT}AV;c6+_RW,\,_lp|_ȯ]ԫ~O>ߝo;j=󻻻kٴ "}E *L Ǐˇ~X*QW/Tlfݯba+*UbK!ޭo*V*>Yº|!r|GUz58d=zt@=/Kt:>p$AMj@Rѣ#Ɓ7."?&Y6X)jLA)iG<%uͼM*6g{4$[3d۶T'ayCQ٭[?&w01A~R)+dڴzf7?4 }~yY{|8}G׿~0XśnmT`ݺr- gg/zr.>F>BC~*6?ߞYγ!sd_h7h돣"}}-VO=^_2[{7iƿWf*i3afB/9fCJg:ٻ,X\utyr^HOϠ/9C?Xn ;Bnm'?[_|pXAYx.A#2W_=]na ܊f| tdʱ2ǎ @s74ӹNEf ?a勵/o >lV}}׾2S*d?|8?qoi:>"R(L˗z o ?E`kB6}@UV،VdA\{K]^LM;Tl6<%ߧb %?_ʟXS܎> cJ˖5Ν =qyH}|ˢW/9Zǖ{#g]|qs4`h6;ܠoFlm.k۷? B =G.d1X'+W6Gmu-IZPN8t_>@2jŋG_ϋP;CG |J),Yo:}:dYnŵU#N-϶Q姹KGTw'grǶyHS?Rgs/t; @٫S~l߾B}@G]q7V]'u(5v_Ki m[ȧ?=vT3_ܾ,[@Hjl'm @t}ditvvj ^?;Ź!=|fn889>@qY:r.k_s G|5Y{QgɤhfmҘj=okkZ )1/T3m]i֬6gGBb̟$#ZU-_ 2^CʧG#c6+k_ku)[>@qyi4b[Bۃ~GO2DmhhKuE'Io&M-(hΧ&߱qO?}M7)[60nǐ]/m>ǵ~֖}CJoWhBܸy'[>N[}g?G@NV۶hퟬ %`YfUoi @~f(Qq%ߴiӤe8^ ' @`\ 3"d $@eٌC @ @ G*Z ?PG&a&~ѣnBMji暴U=mQn}J?ԞY> >q9?)O}k٦ZB~ʧ~yvꒅ : GAQ{BB73ۂ ֒SFGY c(g-BZhCw>+Wx 1+>>}mC66_8t˪UkDې_BcP>dG EI_m~VlU`>w }K8ٕ3Z~%+w |kEȎ|[ +m>ž@2_?G CV?GY OP |&D_^]pNIg,@`٭ӳo*0E @ @@~gΜ)'O!ixAM٪jZ5f6;:w琜[$@@{777'#@& {L: 0id>7{l9uTUzm+*ש\P3&y~/*kE2XQ /d "A@5kV7A Oj@U ڲ5*;Tn@H"%eWVT6Y ݻd}Yn[!ozBO'?9m{ @ L*u/>imѤVOiD n,8OدW#́> ?> |!? ?\-i\hQPtGV\'SC|IҶ3T(|9>@qY-pJ9p@aΰ!?Wͧ!n#O)~߿?ZժUP9diyl dpŕ|B =( @@ _FPA @ @:u477W @JȊ@'`rǜBT@M۵(Ǿ9iΝ+T=(2Q|?G8Oqܬ!i+޼yRRO9TGn> 8S??mmmщ }[aMCv ?2_!i[GqQB}B˧=/7mC~u"qqd+l^g_J'T|9>m{o|,ƯG t>Lź/6KzBE |9>@qY-hb6iMO'||0}g< IZ`IENDB`vhost-0.10.0/src/backend.rs000064400000000000000000000552601046102023000136010ustar 00000000000000// Copyright (C) 2019-2021 Alibaba Cloud. All rights reserved. // SPDX-License-Identifier: Apache-2.0 or BSD-3-Clause // // Portions Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. // // Portions Copyright 2017 The Chromium OS Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE-BSD-Google file. //! Common traits and structs for vhost-kern and vhost-user backend drivers. use std::cell::RefCell; use std::os::unix::io::AsRawFd; use std::os::unix::io::RawFd; use std::sync::RwLock; use vm_memory::{bitmap::Bitmap, Address, GuestMemoryRegion, GuestRegionMmap}; use vmm_sys_util::eventfd::EventFd; #[cfg(feature = "vhost-user")] use super::vhost_user::message::{VhostUserMemoryRegion, VhostUserSingleMemoryRegion}; use super::{Error, Result}; /// Maximum number of memory regions supported. pub const VHOST_MAX_MEMORY_REGIONS: usize = 255; /// Vring configuration data. #[derive(Default, Clone, Copy)] pub struct VringConfigData { /// Maximum queue size supported by the driver. pub queue_max_size: u16, /// Actual queue size negotiated by the driver. pub queue_size: u16, /// Bitmask of vring flags. pub flags: u32, /// Descriptor table address. pub desc_table_addr: u64, /// Used ring buffer address. pub used_ring_addr: u64, /// Available ring buffer address. pub avail_ring_addr: u64, /// Optional address for logging. pub log_addr: Option, } impl VringConfigData { /// Check whether the log (flag, address) pair is valid. pub fn is_log_addr_valid(&self) -> bool { if self.flags & 0x1 != 0 && self.log_addr.is_none() { return false; } true } /// Get the log address, default to zero if not available. pub fn get_log_addr(&self) -> u64 { if self.flags & 0x1 != 0 && self.log_addr.is_some() { self.log_addr.unwrap() } else { 0 } } } /// Memory region configuration data. #[derive(Default, Clone, Copy)] pub struct VhostUserMemoryRegionInfo { /// Guest physical address of the memory region. pub guest_phys_addr: u64, /// Size of the memory region. pub memory_size: u64, /// Virtual address in the current process. pub userspace_addr: u64, /// Optional offset where region starts in the mapped memory. pub mmap_offset: u64, /// Optional file descriptor for mmap. pub mmap_handle: RawFd, #[cfg(feature = "xen")] /// Xen specific flags. pub xen_mmap_flags: u32, #[cfg(feature = "xen")] /// Xen specific data. pub xen_mmap_data: u32, } impl VhostUserMemoryRegionInfo { /// Creates Self from GuestRegionMmap. pub fn from_guest_region(region: &GuestRegionMmap) -> Result { let file_offset = region .file_offset() .ok_or(Error::InvalidGuestMemoryRegion)?; Ok(Self { guest_phys_addr: region.start_addr().raw_value(), memory_size: region.len(), userspace_addr: region.as_ptr() as u64, mmap_offset: file_offset.start(), mmap_handle: file_offset.file().as_raw_fd(), #[cfg(feature = "xen")] xen_mmap_flags: region.xen_mmap_flags(), #[cfg(feature = "xen")] xen_mmap_data: region.xen_mmap_data(), }) } /// Creates VhostUserMemoryRegion from Self. #[cfg(feature = "vhost-user")] pub fn to_region(&self) -> VhostUserMemoryRegion { #[cfg(not(feature = "xen"))] return VhostUserMemoryRegion::new( self.guest_phys_addr, self.memory_size, self.userspace_addr, self.mmap_offset, ); #[cfg(feature = "xen")] VhostUserMemoryRegion::with_xen( self.guest_phys_addr, self.memory_size, self.userspace_addr, self.mmap_offset, self.xen_mmap_flags, self.xen_mmap_data, ) } /// Creates VhostUserSingleMemoryRegion from Self. #[cfg(feature = "vhost-user")] pub fn to_single_region(&self) -> VhostUserSingleMemoryRegion { VhostUserSingleMemoryRegion::new( self.guest_phys_addr, self.memory_size, self.userspace_addr, self.mmap_offset, #[cfg(feature = "xen")] self.xen_mmap_flags, #[cfg(feature = "xen")] self.xen_mmap_data, ) } } /// Shared memory region data for logging dirty pages #[derive(Default, Clone, Copy)] pub struct VhostUserDirtyLogRegion { /// Size of the shared memory region for logging dirty pages pub mmap_size: u64, /// Offset where region starts pub mmap_offset: u64, /// File descriptor for mmap pub mmap_handle: RawFd, } /// Vhost memory access permission (VHOST_ACCESS_* mapping) #[repr(u8)] #[derive(Clone, Copy, Debug, Default, PartialEq, Eq, PartialOrd, Ord)] pub enum VhostAccess { /// No access. #[default] No = 0, /// Read-Only access. ReadOnly = 1, /// Write-Only access. WriteOnly = 2, /// Read and Write access. ReadWrite = 3, } /// Vhost IOTLB message type (VHOST_IOTLB_* mapping) #[repr(u8)] #[derive(Clone, Copy, Debug, Default, PartialEq, Eq, PartialOrd, Ord)] pub enum VhostIotlbType { /// Empty message (not valid). #[default] Empty = 0, /// I/O virtual address mapping is missing or invalidated. Miss = 1, /// Update the I/O virtual address mapping. Update = 2, /// Invalidate the I/O virtual address mapping. Invalidate = 3, /// Access failed to an I/O virtual address. AccessFail = 4, /// Batch of multiple `Update` messages begins. BatchBegin = 5, /// Batch of multiple `Update` messages ends. BatchEnd = 6, } /// Vhost IOTLB message structure. #[derive(Default, Clone, Copy)] pub struct VhostIotlbMsg { /// I/O virtual address. pub iova: u64, /// Size of the I/O mapping. pub size: u64, /// Virtual address in the current process. pub userspace_addr: u64, /// Access permissions. pub perm: VhostAccess, /// Type of the message. pub msg_type: VhostIotlbType, } /// Vhost IOTLB message parser. pub trait VhostIotlbMsgParser { /// Parse the IOTLB message and fill a VhostIotlbMsg. /// /// # Arguments /// * `msg` - IOTLB message parsed. fn parse(&self, msg: &mut VhostIotlbMsg) -> Result<()>; } /// An interface for IOTLB messages support for vhost-based backend pub trait VhostIotlbBackend: std::marker::Sized { /// Send an IOTLB message to the vhost-based backend. /// /// # Arguments /// * `msg` - IOTLB message to send. fn send_iotlb_msg(&self, msg: &VhostIotlbMsg) -> Result<()>; } /// An interface for setting up vhost-based backend drivers with interior mutability. /// /// Vhost devices are subset of virtio devices, which improve virtio device's performance by /// delegating data plane operations to dedicated IO service processes. Vhost devices use the /// same virtqueue layout as virtio devices to allow vhost devices to be mapped directly to /// virtio devices. /// /// The purpose of vhost is to implement a subset of a virtio device's functionality outside the /// VMM process. Typically fast paths for IO operations are delegated to the dedicated IO service /// processes, and slow path for device configuration are still handled by the VMM process. It may /// also be used to control access permissions of virtio backend devices. pub trait VhostBackend: std::marker::Sized { /// Get a bitmask of supported virtio/vhost features. fn get_features(&self) -> Result; /// Inform the vhost subsystem which features to enable. /// This should be a subset of supported features from get_features(). /// /// # Arguments /// * `features` - Bitmask of features to set. fn set_features(&self, features: u64) -> Result<()>; /// Set the current process as the owner of the vhost backend. /// This must be run before any other vhost commands. fn set_owner(&self) -> Result<()>; /// Used to be sent to request disabling all rings /// This is no longer used. fn reset_owner(&self) -> Result<()>; /// Set the guest memory mappings for vhost to use. fn set_mem_table(&self, regions: &[VhostUserMemoryRegionInfo]) -> Result<()>; /// Set base address for page modification logging. fn set_log_base(&self, base: u64, region: Option) -> Result<()>; /// Specify an eventfd file descriptor to signal on log write. fn set_log_fd(&self, fd: RawFd) -> Result<()>; /// Set the number of descriptors in the vring. /// /// # Arguments /// * `queue_index` - Index of the queue to set descriptor count for. /// * `num` - Number of descriptors in the queue. fn set_vring_num(&self, queue_index: usize, num: u16) -> Result<()>; /// Set the addresses for a given vring. /// /// # Arguments /// * `queue_index` - Index of the queue to set addresses for. /// * `config_data` - Configuration data for a vring. fn set_vring_addr(&self, queue_index: usize, config_data: &VringConfigData) -> Result<()>; /// Set the first index to look for available descriptors. /// /// # Arguments /// * `queue_index` - Index of the queue to modify. /// * `num` - Index where available descriptors start. fn set_vring_base(&self, queue_index: usize, base: u16) -> Result<()>; /// Get the available vring base offset. fn get_vring_base(&self, queue_index: usize) -> Result; /// Set the eventfd to trigger when buffers have been used by the host. /// /// # Arguments /// * `queue_index` - Index of the queue to modify. /// * `fd` - EventFd to trigger. fn set_vring_call(&self, queue_index: usize, fd: &EventFd) -> Result<()>; /// Set the eventfd that will be signaled by the guest when buffers are /// available for the host to process. /// /// # Arguments /// * `queue_index` - Index of the queue to modify. /// * `fd` - EventFd that will be signaled from guest. fn set_vring_kick(&self, queue_index: usize, fd: &EventFd) -> Result<()>; /// Set the eventfd that will be signaled by the guest when error happens. /// /// # Arguments /// * `queue_index` - Index of the queue to modify. /// * `fd` - EventFd that will be signaled from guest. fn set_vring_err(&self, queue_index: usize, fd: &EventFd) -> Result<()>; } /// An interface for setting up vhost-based backend drivers. /// /// Vhost devices are subset of virtio devices, which improve virtio device's performance by /// delegating data plane operations to dedicated IO service processes. Vhost devices use the /// same virtqueue layout as virtio devices to allow vhost devices to be mapped directly to /// virtio devices. /// /// The purpose of vhost is to implement a subset of a virtio device's functionality outside the /// VMM process. Typically fast paths for IO operations are delegated to the dedicated IO service /// processes, and slow path for device configuration are still handled by the VMM process. It may /// also be used to control access permissions of virtio backend devices. pub trait VhostBackendMut: std::marker::Sized { /// Get a bitmask of supported virtio/vhost features. fn get_features(&mut self) -> Result; /// Inform the vhost subsystem which features to enable. /// This should be a subset of supported features from get_features(). /// /// # Arguments /// * `features` - Bitmask of features to set. fn set_features(&mut self, features: u64) -> Result<()>; /// Set the current process as the owner of the vhost backend. /// This must be run before any other vhost commands. fn set_owner(&mut self) -> Result<()>; /// Used to be sent to request disabling all rings /// This is no longer used. fn reset_owner(&mut self) -> Result<()>; /// Set the guest memory mappings for vhost to use. fn set_mem_table(&mut self, regions: &[VhostUserMemoryRegionInfo]) -> Result<()>; /// Set base address for page modification logging. fn set_log_base(&mut self, base: u64, region: Option) -> Result<()>; /// Specify an eventfd file descriptor to signal on log write. fn set_log_fd(&mut self, fd: RawFd) -> Result<()>; /// Set the number of descriptors in the vring. /// /// # Arguments /// * `queue_index` - Index of the queue to set descriptor count for. /// * `num` - Number of descriptors in the queue. fn set_vring_num(&mut self, queue_index: usize, num: u16) -> Result<()>; /// Set the addresses for a given vring. /// /// # Arguments /// * `queue_index` - Index of the queue to set addresses for. /// * `config_data` - Configuration data for a vring. fn set_vring_addr(&mut self, queue_index: usize, config_data: &VringConfigData) -> Result<()>; /// Set the first index to look for available descriptors. /// /// # Arguments /// * `queue_index` - Index of the queue to modify. /// * `num` - Index where available descriptors start. fn set_vring_base(&mut self, queue_index: usize, base: u16) -> Result<()>; /// Get the available vring base offset. fn get_vring_base(&mut self, queue_index: usize) -> Result; /// Set the eventfd to trigger when buffers have been used by the host. /// /// # Arguments /// * `queue_index` - Index of the queue to modify. /// * `fd` - EventFd to trigger. fn set_vring_call(&mut self, queue_index: usize, fd: &EventFd) -> Result<()>; /// Set the eventfd that will be signaled by the guest when buffers are /// available for the host to process. /// /// # Arguments /// * `queue_index` - Index of the queue to modify. /// * `fd` - EventFd that will be signaled from guest. fn set_vring_kick(&mut self, queue_index: usize, fd: &EventFd) -> Result<()>; /// Set the eventfd that will be signaled by the guest when error happens. /// /// # Arguments /// * `queue_index` - Index of the queue to modify. /// * `fd` - EventFd that will be signaled from guest. fn set_vring_err(&mut self, queue_index: usize, fd: &EventFd) -> Result<()>; } impl VhostBackend for RwLock { fn get_features(&self) -> Result { self.write().unwrap().get_features() } fn set_features(&self, features: u64) -> Result<()> { self.write().unwrap().set_features(features) } fn set_owner(&self) -> Result<()> { self.write().unwrap().set_owner() } fn reset_owner(&self) -> Result<()> { self.write().unwrap().reset_owner() } fn set_mem_table(&self, regions: &[VhostUserMemoryRegionInfo]) -> Result<()> { self.write().unwrap().set_mem_table(regions) } fn set_log_base(&self, base: u64, region: Option) -> Result<()> { self.write().unwrap().set_log_base(base, region) } fn set_log_fd(&self, fd: RawFd) -> Result<()> { self.write().unwrap().set_log_fd(fd) } fn set_vring_num(&self, queue_index: usize, num: u16) -> Result<()> { self.write().unwrap().set_vring_num(queue_index, num) } fn set_vring_addr(&self, queue_index: usize, config_data: &VringConfigData) -> Result<()> { self.write() .unwrap() .set_vring_addr(queue_index, config_data) } fn set_vring_base(&self, queue_index: usize, base: u16) -> Result<()> { self.write().unwrap().set_vring_base(queue_index, base) } fn get_vring_base(&self, queue_index: usize) -> Result { self.write().unwrap().get_vring_base(queue_index) } fn set_vring_call(&self, queue_index: usize, fd: &EventFd) -> Result<()> { self.write().unwrap().set_vring_call(queue_index, fd) } fn set_vring_kick(&self, queue_index: usize, fd: &EventFd) -> Result<()> { self.write().unwrap().set_vring_kick(queue_index, fd) } fn set_vring_err(&self, queue_index: usize, fd: &EventFd) -> Result<()> { self.write().unwrap().set_vring_err(queue_index, fd) } } impl VhostBackend for RefCell { fn get_features(&self) -> Result { self.borrow_mut().get_features() } fn set_features(&self, features: u64) -> Result<()> { self.borrow_mut().set_features(features) } fn set_owner(&self) -> Result<()> { self.borrow_mut().set_owner() } fn reset_owner(&self) -> Result<()> { self.borrow_mut().reset_owner() } fn set_mem_table(&self, regions: &[VhostUserMemoryRegionInfo]) -> Result<()> { self.borrow_mut().set_mem_table(regions) } fn set_log_base(&self, base: u64, region: Option) -> Result<()> { self.borrow_mut().set_log_base(base, region) } fn set_log_fd(&self, fd: RawFd) -> Result<()> { self.borrow_mut().set_log_fd(fd) } fn set_vring_num(&self, queue_index: usize, num: u16) -> Result<()> { self.borrow_mut().set_vring_num(queue_index, num) } fn set_vring_addr(&self, queue_index: usize, config_data: &VringConfigData) -> Result<()> { self.borrow_mut().set_vring_addr(queue_index, config_data) } fn set_vring_base(&self, queue_index: usize, base: u16) -> Result<()> { self.borrow_mut().set_vring_base(queue_index, base) } fn get_vring_base(&self, queue_index: usize) -> Result { self.borrow_mut().get_vring_base(queue_index) } fn set_vring_call(&self, queue_index: usize, fd: &EventFd) -> Result<()> { self.borrow_mut().set_vring_call(queue_index, fd) } fn set_vring_kick(&self, queue_index: usize, fd: &EventFd) -> Result<()> { self.borrow_mut().set_vring_kick(queue_index, fd) } fn set_vring_err(&self, queue_index: usize, fd: &EventFd) -> Result<()> { self.borrow_mut().set_vring_err(queue_index, fd) } } #[cfg(any(test, feature = "test-utils"))] impl VhostUserMemoryRegionInfo { /// creates instance of `VhostUserMemoryRegionInfo`. pub fn new( guest_phys_addr: u64, memory_size: u64, userspace_addr: u64, mmap_offset: u64, mmap_handle: RawFd, ) -> Self { Self { guest_phys_addr, memory_size, userspace_addr, mmap_offset, mmap_handle, #[cfg(feature = "xen")] xen_mmap_flags: vm_memory::MmapXenFlags::UNIX.bits(), #[cfg(feature = "xen")] xen_mmap_data: 0, } } } #[cfg(test)] mod tests { use super::*; struct MockBackend {} impl VhostBackendMut for MockBackend { fn get_features(&mut self) -> Result { Ok(0x1) } fn set_features(&mut self, features: u64) -> Result<()> { assert_eq!(features, 0x1); Ok(()) } fn set_owner(&mut self) -> Result<()> { Ok(()) } fn reset_owner(&mut self) -> Result<()> { Ok(()) } fn set_mem_table(&mut self, _regions: &[VhostUserMemoryRegionInfo]) -> Result<()> { Ok(()) } fn set_log_base( &mut self, base: u64, region: Option, ) -> Result<()> { assert_eq!(base, 0x100); let region = region.unwrap(); assert_eq!(region.mmap_size, 0x1000); assert_eq!(region.mmap_offset, 0x10); assert_eq!(region.mmap_handle, 100); Ok(()) } fn set_log_fd(&mut self, fd: RawFd) -> Result<()> { assert_eq!(fd, 100); Ok(()) } fn set_vring_num(&mut self, queue_index: usize, num: u16) -> Result<()> { assert_eq!(queue_index, 1); assert_eq!(num, 256); Ok(()) } fn set_vring_addr( &mut self, queue_index: usize, _config_data: &VringConfigData, ) -> Result<()> { assert_eq!(queue_index, 1); Ok(()) } fn set_vring_base(&mut self, queue_index: usize, base: u16) -> Result<()> { assert_eq!(queue_index, 1); assert_eq!(base, 2); Ok(()) } fn get_vring_base(&mut self, queue_index: usize) -> Result { assert_eq!(queue_index, 1); Ok(2) } fn set_vring_call(&mut self, queue_index: usize, _fd: &EventFd) -> Result<()> { assert_eq!(queue_index, 1); Ok(()) } fn set_vring_kick(&mut self, queue_index: usize, _fd: &EventFd) -> Result<()> { assert_eq!(queue_index, 1); Ok(()) } fn set_vring_err(&mut self, queue_index: usize, _fd: &EventFd) -> Result<()> { assert_eq!(queue_index, 1); Ok(()) } } #[test] fn test_vring_backend_mut() { let b = RwLock::new(MockBackend {}); assert_eq!(b.get_features().unwrap(), 0x1); b.set_features(0x1).unwrap(); b.set_owner().unwrap(); b.reset_owner().unwrap(); b.set_mem_table(&[]).unwrap(); b.set_log_base( 0x100, Some(VhostUserDirtyLogRegion { mmap_size: 0x1000, mmap_offset: 0x10, mmap_handle: 100, }), ) .unwrap(); b.set_log_fd(100).unwrap(); b.set_vring_num(1, 256).unwrap(); let config = VringConfigData { queue_max_size: 0x1000, queue_size: 0x2000, flags: 0x0, desc_table_addr: 0x4000, used_ring_addr: 0x5000, avail_ring_addr: 0x6000, log_addr: None, }; b.set_vring_addr(1, &config).unwrap(); b.set_vring_base(1, 2).unwrap(); assert_eq!(b.get_vring_base(1).unwrap(), 2); let eventfd = EventFd::new(0).unwrap(); b.set_vring_call(1, &eventfd).unwrap(); b.set_vring_kick(1, &eventfd).unwrap(); b.set_vring_err(1, &eventfd).unwrap(); } #[test] fn test_vring_config_data() { let mut config = VringConfigData { queue_max_size: 0x1000, queue_size: 0x2000, flags: 0x0, desc_table_addr: 0x4000, used_ring_addr: 0x5000, avail_ring_addr: 0x6000, log_addr: None, }; assert!(config.is_log_addr_valid()); assert_eq!(config.get_log_addr(), 0); config.flags = 0x1; assert!(!config.is_log_addr_valid()); assert_eq!(config.get_log_addr(), 0); config.log_addr = Some(0x7000); assert!(config.is_log_addr_valid()); assert_eq!(config.get_log_addr(), 0x7000); config.flags = 0x0; assert!(config.is_log_addr_valid()); assert_eq!(config.get_log_addr(), 0); } } vhost-0.10.0/src/lib.rs000064400000000000000000000151321046102023000127520ustar 00000000000000// Copyright (C) 2019 Alibaba Cloud. All rights reserved. // SPDX-License-Identifier: Apache-2.0 or BSD-3-Clause //! Virtio Vhost Backend Drivers //! //! Virtio devices use virtqueues to transport data efficiently. The first generation of virtqueue //! is a set of three different single-producer, single-consumer ring structures designed to store //! generic scatter-gather I/O. The virtio specification 1.1 introduces an alternative compact //! virtqueue layout named "Packed Virtqueue", which is more friendly to memory cache system and //! hardware implemented virtio devices. The packed virtqueue uses read-write memory, that means //! the memory will be both read and written by both host and guest. The new Packed Virtqueue is //! preferred for performance. //! //! Vhost is a mechanism to improve performance of Virtio devices by delegate data plane operations //! to dedicated IO service processes. Only the configuration, I/O submission notification, and I/O //! completion interruption are piped through the hypervisor. //! It uses the same virtqueue layout as Virtio to allow Vhost devices to be mapped directly to //! Virtio devices. This allows a Vhost device to be accessed directly by a guest OS inside a //! hypervisor process with an existing Virtio (PCI) driver. //! //! The initial vhost implementation is a part of the Linux kernel and uses ioctl interface to //! communicate with userspace applications. Dedicated kernel worker threads are created to handle //! IO requests from the guest. //! //! Later Vhost-user protocol is introduced to complement the ioctl interface used to control the //! vhost implementation in the Linux kernel. It implements the control plane needed to establish //! virtqueues sharing with a user space process on the same host. It uses communication over a //! Unix domain socket to share file descriptors in the ancillary data of the message. //! The protocol defines 2 sides of the communication, frontend and backend. Frontend is the application //! that shares its virtqueues. Backend is the consumer of the virtqueues. Frontend and backend can be //! either a client (i.e. connecting) or server (listening) in the socket communication. #![deny(missing_docs)] #[cfg_attr(feature = "vhost-user", macro_use)] extern crate bitflags; #[cfg_attr(feature = "vhost-kern", macro_use)] extern crate vmm_sys_util; mod backend; pub use backend::*; #[cfg(feature = "vhost-net")] pub mod net; #[cfg(feature = "vhost-vdpa")] pub mod vdpa; #[cfg(feature = "vhost-kern")] pub mod vhost_kern; #[cfg(feature = "vhost-user")] pub mod vhost_user; #[cfg(feature = "vhost-vsock")] pub mod vsock; /// Error codes for vhost operations #[derive(Debug)] pub enum Error { /// Invalid operations. InvalidOperation, /// Invalid guest memory. InvalidGuestMemory, /// Invalid guest memory region. InvalidGuestMemoryRegion, /// Invalid IOTLB message. InvalidIotlbMsg, /// Invalid queue. InvalidQueue, /// Invalid descriptor table address. DescriptorTableAddress, /// Invalid used address. UsedAddress, /// Invalid available address. AvailAddress, /// Invalid log address. LogAddress, #[cfg(feature = "vhost-kern")] /// Error opening the vhost backend driver. VhostOpen(std::io::Error), #[cfg(feature = "vhost-kern")] /// Error while running ioctl. IoctlError(std::io::Error), /// Error from IO subsystem. IOError(std::io::Error), #[cfg(feature = "vhost-user")] /// Error from the vhost-user subsystem. VhostUserProtocol(vhost_user::Error), } impl std::fmt::Display for Error { fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { match self { Error::InvalidOperation => write!(f, "invalid vhost operations"), Error::InvalidGuestMemory => write!(f, "invalid guest memory object"), Error::InvalidGuestMemoryRegion => write!(f, "invalid guest memory region"), Error::InvalidIotlbMsg => write!(f, "invalid IOTLB message"), Error::InvalidQueue => write!(f, "invalid virtqueue"), Error::DescriptorTableAddress => { write!(f, "invalid virtqueue descriptor table address") } Error::UsedAddress => write!(f, "invalid virtqueue used table address"), Error::AvailAddress => write!(f, "invalid virtqueue available table address"), Error::LogAddress => write!(f, "invalid virtqueue log address"), Error::IOError(e) => write!(f, "IO error: {}", e), #[cfg(feature = "vhost-kern")] Error::VhostOpen(e) => write!(f, "failure in opening vhost file: {}", e), #[cfg(feature = "vhost-kern")] Error::IoctlError(e) => write!(f, "failure in vhost ioctl: {}", e), #[cfg(feature = "vhost-user")] Error::VhostUserProtocol(e) => write!(f, "vhost-user: {}", e), } } } impl std::error::Error for Error {} #[cfg(feature = "vhost-user")] impl std::convert::From for Error { fn from(err: vhost_user::Error) -> Self { Error::VhostUserProtocol(err) } } /// Result of vhost operations pub type Result = std::result::Result; #[cfg(test)] mod tests { use super::*; #[test] fn test_error() { assert_eq!( format!("{}", Error::AvailAddress), "invalid virtqueue available table address" ); assert_eq!( format!("{}", Error::InvalidOperation), "invalid vhost operations" ); assert_eq!( format!("{}", Error::InvalidGuestMemory), "invalid guest memory object" ); assert_eq!( format!("{}", Error::InvalidGuestMemoryRegion), "invalid guest memory region" ); assert_eq!( format!("{}", Error::InvalidIotlbMsg), "invalid IOTLB message" ); assert_eq!(format!("{}", Error::InvalidQueue), "invalid virtqueue"); assert_eq!( format!("{}", Error::DescriptorTableAddress), "invalid virtqueue descriptor table address" ); assert_eq!( format!("{}", Error::UsedAddress), "invalid virtqueue used table address" ); assert_eq!( format!("{}", Error::LogAddress), "invalid virtqueue log address" ); assert_eq!(format!("{:?}", Error::AvailAddress), "AvailAddress"); } #[cfg(feature = "vhost-user")] #[test] fn test_convert_from_vhost_user_error() { let e: Error = vhost_user::Error::OversizedMsg.into(); assert_eq!(format!("{}", e), "vhost-user: oversized message"); } } vhost-0.10.0/src/net.rs000064400000000000000000000011161046102023000127670ustar 00000000000000// Copyright (C) 2021 Alibaba Cloud Computing. All rights reserved. // SPDX-License-Identifier: Apache-2.0 or BSD-3-Clause //! Trait to control vhost-net backend drivers. use std::fs::File; use crate::backend::VhostBackend; use crate::Result; /// Trait to control vhost-net backend drivers. pub trait VhostNet: VhostBackend { /// Set fd as VHOST_NET backend. /// /// # Arguments /// * `queue_index` - Index of the virtqueue /// * `fd` - The file descriptor which servers as the backend fn set_backend(&self, queue_idx: usize, fd: Option<&File>) -> Result<()>; } vhost-0.10.0/src/vdpa.rs000064400000000000000000000112531046102023000131360ustar 00000000000000// Copyright (C) 2021 Red Hat, Inc. All rights reserved. // SPDX-License-Identifier: Apache-2.0 or BSD-3-Clause //! Trait to control vhost-vdpa backend drivers. use vmm_sys_util::eventfd::EventFd; use crate::backend::VhostBackend; use crate::Result; /// vhost vdpa IOVA range pub struct VhostVdpaIovaRange { /// First address that can be mapped by vhost-vDPA. pub first: u64, /// Last address that can be mapped by vhost-vDPA. pub last: u64, } /// Trait to control vhost-vdpa backend drivers. /// /// vDPA (virtio Data Path Acceleration) devices has datapath compliant with the /// virtio specification and the control path is vendor specific. /// vDPA devices can be both physically located on the hardware or emulated /// by software. /// /// Compared to vhost acceleration, vDPA offers more control over the device /// lifecycle. /// For this reason, the vhost-vdpa interface extends the vhost API, offering /// additional APIs for controlling the device (e.g. changing the state or /// accessing the configuration space pub trait VhostVdpa: VhostBackend { /// Get the device id. /// The device ids follow the same definition of the device id defined in virtio-spec. fn get_device_id(&self) -> Result; /// Get the status. /// The status bits follow the same definition of the device status defined in virtio-spec. fn get_status(&self) -> Result; /// Set the status. /// The status bits follow the same definition of the device status defined in virtio-spec. /// /// # Arguments /// * `status` - Status bits to set fn set_status(&self, status: u8) -> Result<()>; /// Get the device configuration. /// /// # Arguments /// * `offset` - Offset in the device configuration space /// * `buffer` - Buffer for configuration data fn get_config(&self, offset: u32, buffer: &mut [u8]) -> Result<()>; /// Set the device configuration. /// /// # Arguments /// * `offset` - Offset in the device configuration space /// * `buffer` - Buffer for configuration data fn set_config(&self, offset: u32, buffer: &[u8]) -> Result<()>; /// Set the status for a given vring. /// /// # Arguments /// * `queue_index` - Index of the queue to enable/disable. /// * `enabled` - true to enable the vring, false to disable it. fn set_vring_enable(&self, queue_index: usize, enabled: bool) -> Result<()>; /// Get the maximum number of descriptors in the vring supported by the device. fn get_vring_num(&self) -> Result; /// Set the eventfd to trigger when device configuration change. /// /// # Arguments /// * `fd` - EventFd to trigger. fn set_config_call(&self, fd: &EventFd) -> Result<()>; /// Get the valid I/O virtual addresses range supported by the device. fn get_iova_range(&self) -> Result; /// Get the config size fn get_config_size(&self) -> Result; /// Get the count of all virtqueues fn get_vqs_count(&self) -> Result; /// Get the number of virtqueue groups fn get_group_num(&self) -> Result; /// Get the number of address spaces fn get_as_num(&self) -> Result; /// Get the group for a virtqueue. /// The virtqueue index is stored in the index field of /// vhost_vring_state. The group for this specific virtqueue is /// returned via num field of vhost_vring_state. fn get_vring_group(&self, queue_index: u32) -> Result; /// Set the ASID for a virtqueue group. The group index is stored in /// the index field of vhost_vring_state, the ASID associated with this /// group is stored at num field of vhost_vring_state. fn set_group_asid(&self, group_index: u32, asid: u32) -> Result<()>; /// Suspend a device so it does not process virtqueue requests anymore /// /// After the return of ioctl the device must preserve all the necessary state /// (the virtqueue vring base plus the possible device specific states) that is /// required for restoring in the future. The device must not change its /// configuration after that point. fn suspend(&self) -> Result<()>; /// Map DMA region. /// /// # Arguments /// * `iova` - I/O virtual address. /// * `size` - Size of the I/O mapping. /// * `vaddr` - Virtual address in the current process. /// * `readonly` - true if the region is read-only, false if reads and writes are allowed. fn dma_map(&self, iova: u64, size: u64, vaddr: *const u8, readonly: bool) -> Result<()>; /// Unmap DMA region. /// /// # Arguments /// * `iova` - I/O virtual address. /// * `size` - Size of the I/O mapping. fn dma_unmap(&self, iova: u64, size: u64) -> Result<()>; } vhost-0.10.0/src/vhost_kern/mod.rs000064400000000000000000000425371046102023000151560ustar 00000000000000// Copyright (C) 2019 Alibaba Cloud Computing. All rights reserved. // SPDX-License-Identifier: Apache-2.0 or BSD-3-Clause // // Portions Copyright 2017 The Chromium OS Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE-BSD-Google file. //! Traits and structs to control Linux in-kernel vhost drivers. //! //! The initial vhost implementation is a part of the Linux kernel and uses ioctl interface to //! communicate with userspace applications. This sub module provides ioctl based interfaces to //! control the in-kernel net, scsi, vsock vhost drivers. use std::mem; use std::os::unix::io::{AsRawFd, RawFd}; use libc::{c_void, ssize_t, write}; use vm_memory::{Address, GuestAddress, GuestAddressSpace, GuestMemory, GuestUsize}; use vmm_sys_util::eventfd::EventFd; use vmm_sys_util::ioctl::{ioctl, ioctl_with_mut_ref, ioctl_with_ptr, ioctl_with_ref}; use super::{ Error, Result, VhostBackend, VhostIotlbBackend, VhostIotlbMsg, VhostIotlbMsgParser, VhostUserDirtyLogRegion, VhostUserMemoryRegionInfo, VringConfigData, VHOST_MAX_MEMORY_REGIONS, }; pub mod vhost_binding; use self::vhost_binding::*; #[cfg(feature = "vhost-net")] pub mod net; #[cfg(feature = "vhost-vdpa")] pub mod vdpa; #[cfg(feature = "vhost-vsock")] pub mod vsock; #[inline] fn ioctl_result(rc: i32, res: T) -> Result { if rc < 0 { Err(Error::IoctlError(std::io::Error::last_os_error())) } else { Ok(res) } } #[inline] fn io_result(rc: isize, res: T) -> Result { if rc < 0 { Err(Error::IOError(std::io::Error::last_os_error())) } else { Ok(res) } } /// Represent an in-kernel vhost device backend. pub trait VhostKernBackend: AsRawFd { /// Associated type to access guest memory. type AS: GuestAddressSpace; /// Get the object to access the guest's memory. fn mem(&self) -> &Self::AS; /// Check whether the ring configuration is valid. fn is_valid(&self, config_data: &VringConfigData) -> bool { let queue_size = config_data.queue_size; if queue_size > config_data.queue_max_size || queue_size == 0 || (queue_size & (queue_size - 1)) != 0 { return false; } let m = self.mem().memory(); let desc_table_size = 16 * u64::from(queue_size) as GuestUsize; let avail_ring_size = 6 + 2 * u64::from(queue_size) as GuestUsize; let used_ring_size = 6 + 8 * u64::from(queue_size) as GuestUsize; if GuestAddress(config_data.desc_table_addr) .checked_add(desc_table_size) .map_or(true, |v| !m.address_in_range(v)) { return false; } if GuestAddress(config_data.avail_ring_addr) .checked_add(avail_ring_size) .map_or(true, |v| !m.address_in_range(v)) { return false; } if GuestAddress(config_data.used_ring_addr) .checked_add(used_ring_size) .map_or(true, |v| !m.address_in_range(v)) { return false; } config_data.is_log_addr_valid() } } impl VhostBackend for T { /// Get a bitmask of supported virtio/vhost features. fn get_features(&self) -> Result { let mut avail_features: u64 = 0; // SAFETY: This ioctl is called on a valid vhost fd and has its return value checked. let ret = unsafe { ioctl_with_mut_ref(self, VHOST_GET_FEATURES(), &mut avail_features) }; ioctl_result(ret, avail_features) } /// Inform the vhost subsystem which features to enable. This should be a subset of /// supported features from VHOST_GET_FEATURES. /// /// # Arguments /// * `features` - Bitmask of features to set. fn set_features(&self, features: u64) -> Result<()> { // SAFETY: This ioctl is called on a valid vhost fd and has its return value checked. let ret = unsafe { ioctl_with_ref(self, VHOST_SET_FEATURES(), &features) }; ioctl_result(ret, ()) } /// Set the current process as the owner of this file descriptor. /// This must be run before any other vhost ioctls. fn set_owner(&self) -> Result<()> { // SAFETY: This ioctl is called on a valid vhost fd and has its return value checked. let ret = unsafe { ioctl(self, VHOST_SET_OWNER()) }; ioctl_result(ret, ()) } fn reset_owner(&self) -> Result<()> { // SAFETY: This ioctl is called on a valid vhost fd and has its return value checked. let ret = unsafe { ioctl(self, VHOST_RESET_OWNER()) }; ioctl_result(ret, ()) } /// Set the guest memory mappings for vhost to use. fn set_mem_table(&self, regions: &[VhostUserMemoryRegionInfo]) -> Result<()> { if regions.is_empty() || regions.len() > VHOST_MAX_MEMORY_REGIONS { return Err(Error::InvalidGuestMemory); } let mut vhost_memory = VhostMemory::new(regions.len() as u16); for (index, region) in regions.iter().enumerate() { vhost_memory.set_region( index as u32, &vhost_memory_region { guest_phys_addr: region.guest_phys_addr, memory_size: region.memory_size, userspace_addr: region.userspace_addr, flags_padding: 0u64, }, )?; } // SAFETY: This ioctl is called with a pointer that is valid for the lifetime // of this function. The kernel will make its own copy of the memory // tables. As always, check the return value. let ret = unsafe { ioctl_with_ptr(self, VHOST_SET_MEM_TABLE(), vhost_memory.as_ptr()) }; ioctl_result(ret, ()) } /// Set base address for page modification logging. /// /// # Arguments /// * `base` - Base address for page modification logging. fn set_log_base(&self, base: u64, region: Option) -> Result<()> { if region.is_some() { return Err(Error::LogAddress); } // SAFETY: This ioctl is called on a valid vhost fd and has its return value checked. let ret = unsafe { ioctl_with_ref(self, VHOST_SET_LOG_BASE(), &base) }; ioctl_result(ret, ()) } /// Specify an eventfd file descriptor to signal on log write. fn set_log_fd(&self, fd: RawFd) -> Result<()> { let val: i32 = fd; // SAFETY: This ioctl is called on a valid vhost fd and has its return value checked. let ret = unsafe { ioctl_with_ref(self, VHOST_SET_LOG_FD(), &val) }; ioctl_result(ret, ()) } /// Set the number of descriptors in the vring. /// /// # Arguments /// * `queue_index` - Index of the queue to set descriptor count for. /// * `num` - Number of descriptors in the queue. fn set_vring_num(&self, queue_index: usize, num: u16) -> Result<()> { let vring_state = vhost_vring_state { index: queue_index as u32, num: u32::from(num), }; // SAFETY: This ioctl is called on a valid vhost fd and has its return value checked. let ret = unsafe { ioctl_with_ref(self, VHOST_SET_VRING_NUM(), &vring_state) }; ioctl_result(ret, ()) } /// Set the addresses for a given vring. /// /// # Arguments /// * `queue_index` - Index of the queue to set addresses for. /// * `config_data` - Vring config data, addresses of desc_table, avail_ring /// and used_ring are in the guest address space. fn set_vring_addr(&self, queue_index: usize, config_data: &VringConfigData) -> Result<()> { if !self.is_valid(config_data) { return Err(Error::InvalidQueue); } // The addresses are converted into the host address space. let vring_addr = config_data.to_vhost_vring_addr(queue_index, self.mem())?; // SAFETY: This ioctl is called on a valid vhost fd and has its // return value checked. let ret = unsafe { ioctl_with_ref(self, VHOST_SET_VRING_ADDR(), &vring_addr) }; ioctl_result(ret, ()) } /// Set the first index to look for available descriptors. /// /// # Arguments /// * `queue_index` - Index of the queue to modify. /// * `num` - Index where available descriptors start. fn set_vring_base(&self, queue_index: usize, base: u16) -> Result<()> { let vring_state = vhost_vring_state { index: queue_index as u32, num: u32::from(base), }; // SAFETY: This ioctl is called on a valid vhost fd and has its return value checked. let ret = unsafe { ioctl_with_ref(self, VHOST_SET_VRING_BASE(), &vring_state) }; ioctl_result(ret, ()) } /// Get a bitmask of supported virtio/vhost features. fn get_vring_base(&self, queue_index: usize) -> Result { let vring_state = vhost_vring_state { index: queue_index as u32, num: 0, }; // SAFETY: This ioctl is called on a valid vhost fd and has its return value checked. let ret = unsafe { ioctl_with_ref(self, VHOST_GET_VRING_BASE(), &vring_state) }; ioctl_result(ret, vring_state.num) } /// Set the eventfd to trigger when buffers have been used by the host. /// /// # Arguments /// * `queue_index` - Index of the queue to modify. /// * `fd` - EventFd to trigger. fn set_vring_call(&self, queue_index: usize, fd: &EventFd) -> Result<()> { let vring_file = vhost_vring_file { index: queue_index as u32, fd: fd.as_raw_fd(), }; // SAFETY: This ioctl is called on a valid vhost fd and has its return value checked. let ret = unsafe { ioctl_with_ref(self, VHOST_SET_VRING_CALL(), &vring_file) }; ioctl_result(ret, ()) } /// Set the eventfd that will be signaled by the guest when buffers are /// available for the host to process. /// /// # Arguments /// * `queue_index` - Index of the queue to modify. /// * `fd` - EventFd that will be signaled from guest. fn set_vring_kick(&self, queue_index: usize, fd: &EventFd) -> Result<()> { let vring_file = vhost_vring_file { index: queue_index as u32, fd: fd.as_raw_fd(), }; // SAFETY: This ioctl is called on a valid vhost fd and has its return value checked. let ret = unsafe { ioctl_with_ref(self, VHOST_SET_VRING_KICK(), &vring_file) }; ioctl_result(ret, ()) } /// Set the eventfd to signal an error from the vhost backend. /// /// # Arguments /// * `queue_index` - Index of the queue to modify. /// * `fd` - EventFd that will be signaled from the backend. fn set_vring_err(&self, queue_index: usize, fd: &EventFd) -> Result<()> { let vring_file = vhost_vring_file { index: queue_index as u32, fd: fd.as_raw_fd(), }; // SAFETY: This ioctl is called on a valid vhost fd and has its return value checked. let ret = unsafe { ioctl_with_ref(self, VHOST_SET_VRING_ERR(), &vring_file) }; ioctl_result(ret, ()) } } /// Interface to handle in-kernel backend features. pub trait VhostKernFeatures: Sized + AsRawFd { /// Get features acked with the vhost backend. fn get_backend_features_acked(&self) -> u64; /// Set features acked with the vhost backend. fn set_backend_features_acked(&mut self, features: u64); /// Get a bitmask of supported vhost backend features. fn get_backend_features(&self) -> Result { let mut avail_features: u64 = 0; let ret = // SAFETY: This ioctl is called on a valid vhost fd and has its return value checked. unsafe { ioctl_with_mut_ref(self, VHOST_GET_BACKEND_FEATURES(), &mut avail_features) }; ioctl_result(ret, avail_features) } /// Inform the vhost subsystem which backend features to enable. This should /// be a subset of supported features from VHOST_GET_BACKEND_FEATURES. /// /// # Arguments /// * `features` - Bitmask of features to set. fn set_backend_features(&mut self, features: u64) -> Result<()> { // SAFETY: This ioctl is called on a valid vhost fd and has its return value checked. let ret = unsafe { ioctl_with_ref(self, VHOST_SET_BACKEND_FEATURES(), &features) }; if ret >= 0 { self.set_backend_features_acked(features); } ioctl_result(ret, ()) } } /// Handle IOTLB messeges for in-kernel vhost device backend. impl VhostIotlbBackend for I { /// Send an IOTLB message to the in-kernel vhost backend. /// /// # Arguments /// * `msg` - IOTLB message to send. fn send_iotlb_msg(&self, msg: &VhostIotlbMsg) -> Result<()> { let ret: ssize_t; if self.get_backend_features_acked() & (1 << VHOST_BACKEND_F_IOTLB_MSG_V2) != 0 { let mut msg_v2 = vhost_msg_v2 { type_: VHOST_IOTLB_MSG_V2, ..Default::default() }; msg_v2.__bindgen_anon_1.iotlb.iova = msg.iova; msg_v2.__bindgen_anon_1.iotlb.size = msg.size; msg_v2.__bindgen_anon_1.iotlb.uaddr = msg.userspace_addr; msg_v2.__bindgen_anon_1.iotlb.perm = msg.perm as u8; msg_v2.__bindgen_anon_1.iotlb.type_ = msg.msg_type as u8; // SAFETY: This is safe because we are using a valid vhost fd, and // a valid pointer and size to the vhost_msg_v2 structure. ret = unsafe { write( self.as_raw_fd(), &msg_v2 as *const vhost_msg_v2 as *const c_void, mem::size_of::(), ) }; } else { let mut msg_v1 = vhost_msg { type_: VHOST_IOTLB_MSG, ..Default::default() }; msg_v1.__bindgen_anon_1.iotlb.iova = msg.iova; msg_v1.__bindgen_anon_1.iotlb.size = msg.size; msg_v1.__bindgen_anon_1.iotlb.uaddr = msg.userspace_addr; msg_v1.__bindgen_anon_1.iotlb.perm = msg.perm as u8; msg_v1.__bindgen_anon_1.iotlb.type_ = msg.msg_type as u8; // SAFETY: This is safe because we are using a valid vhost fd, and // a valid pointer and size to the vhost_msg structure. ret = unsafe { write( self.as_raw_fd(), &msg_v1 as *const vhost_msg as *const c_void, mem::size_of::(), ) }; } io_result(ret, ()) } } impl VhostIotlbMsgParser for vhost_msg { fn parse(&self, msg: &mut VhostIotlbMsg) -> Result<()> { if self.type_ != VHOST_IOTLB_MSG { return Err(Error::InvalidIotlbMsg); } // SAFETY: We trust the kernel to return a structure with the union // fields properly initialized. We are sure it is a vhost_msg, because // we checked that `self.type_` is VHOST_IOTLB_MSG. unsafe { if self.__bindgen_anon_1.iotlb.type_ == 0 { return Err(Error::InvalidIotlbMsg); } msg.iova = self.__bindgen_anon_1.iotlb.iova; msg.size = self.__bindgen_anon_1.iotlb.size; msg.userspace_addr = self.__bindgen_anon_1.iotlb.uaddr; msg.perm = mem::transmute(self.__bindgen_anon_1.iotlb.perm); msg.msg_type = mem::transmute(self.__bindgen_anon_1.iotlb.type_); } Ok(()) } } impl VhostIotlbMsgParser for vhost_msg_v2 { fn parse(&self, msg: &mut VhostIotlbMsg) -> Result<()> { if self.type_ != VHOST_IOTLB_MSG_V2 { return Err(Error::InvalidIotlbMsg); } // SAFETY: We trust the kernel to return a structure with the union // fields properly initialized. We are sure it is a vhost_msg_v2, because // we checked that `self.type_` is VHOST_IOTLB_MSG_V2. unsafe { if self.__bindgen_anon_1.iotlb.type_ == 0 { return Err(Error::InvalidIotlbMsg); } msg.iova = self.__bindgen_anon_1.iotlb.iova; msg.size = self.__bindgen_anon_1.iotlb.size; msg.userspace_addr = self.__bindgen_anon_1.iotlb.uaddr; msg.perm = mem::transmute(self.__bindgen_anon_1.iotlb.perm); msg.msg_type = mem::transmute(self.__bindgen_anon_1.iotlb.type_); } Ok(()) } } impl VringConfigData { /// Convert the config (guest address space) into vhost_vring_addr /// (host address space). pub fn to_vhost_vring_addr( &self, queue_index: usize, mem: &AS, ) -> Result { let desc_addr = mem .memory() .get_host_address(GuestAddress(self.desc_table_addr)) .map_err(|_| Error::DescriptorTableAddress)?; let avail_addr = mem .memory() .get_host_address(GuestAddress(self.avail_ring_addr)) .map_err(|_| Error::AvailAddress)?; let used_addr = mem .memory() .get_host_address(GuestAddress(self.used_ring_addr)) .map_err(|_| Error::UsedAddress)?; Ok(vhost_vring_addr { index: queue_index as u32, flags: self.flags, desc_user_addr: desc_addr as u64, used_user_addr: used_addr as u64, avail_user_addr: avail_addr as u64, log_guest_addr: self.get_log_addr(), }) } } vhost-0.10.0/src/vhost_kern/net.rs000064400000000000000000000117321046102023000151560ustar 00000000000000// Copyright (C) 2021 Alibaba Cloud Computing. All rights reserved. // SPDX-License-Identifier: Apache-2.0 or BSD-3-Clause //! Kernel-based vhost-net backend use std::fs::{File, OpenOptions}; use std::os::unix::fs::OpenOptionsExt; use std::os::unix::io::{AsRawFd, RawFd}; use vm_memory::GuestAddressSpace; use vmm_sys_util::ioctl::ioctl_with_ref; use super::vhost_binding::*; use super::{ioctl_result, Error, Result, VhostKernBackend}; use crate::net::*; const VHOST_NET_PATH: &str = "/dev/vhost-net"; /// Handle for running VHOST_NET ioctls pub struct Net { fd: File, mem: AS, } impl Net { /// Open a handle to a new VHOST-NET instance. pub fn new(mem: AS) -> Result { Ok(Net { fd: OpenOptions::new() .read(true) .write(true) .custom_flags(libc::O_CLOEXEC | libc::O_NONBLOCK) .open(VHOST_NET_PATH) .map_err(Error::VhostOpen)?, mem, }) } } impl VhostNet for Net { fn set_backend(&self, queue_index: usize, fd: Option<&File>) -> Result<()> { let vring_file = vhost_vring_file { index: queue_index as u32, fd: fd.map_or(-1, |v| v.as_raw_fd()), }; // SAFETY: Safe because the vhost-net fd is valid and we check the return value let ret = unsafe { ioctl_with_ref(self, VHOST_NET_SET_BACKEND(), &vring_file) }; ioctl_result(ret, ()) } } impl VhostKernBackend for Net { type AS = AS; fn mem(&self) -> &Self::AS { &self.mem } } impl AsRawFd for Net { fn as_raw_fd(&self) -> RawFd { self.fd.as_raw_fd() } } #[cfg(test)] mod tests { use vm_memory::{GuestAddress, GuestMemory, GuestMemoryMmap}; use vmm_sys_util::eventfd::EventFd; use super::*; use crate::{ VhostBackend, VhostUserDirtyLogRegion, VhostUserMemoryRegionInfo, VringConfigData, }; use serial_test::serial; #[test] #[serial] fn test_net_new_device() { let m = GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0), 0x10_0000)]).unwrap(); let net = Net::new(&m).unwrap(); assert!(net.as_raw_fd() >= 0); assert!(net.mem().find_region(GuestAddress(0x100)).is_some()); assert!(net.mem().find_region(GuestAddress(0x10_0000)).is_none()); } #[test] #[serial] fn test_net_is_valid() { let m = GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0), 0x10_0000)]).unwrap(); let net = Net::new(&m).unwrap(); let mut config = VringConfigData { queue_max_size: 32, queue_size: 32, flags: 0, desc_table_addr: 0x1000, used_ring_addr: 0x2000, avail_ring_addr: 0x3000, log_addr: None, }; assert!(net.is_valid(&config)); config.queue_size = 0; assert!(!net.is_valid(&config)); config.queue_size = 31; assert!(!net.is_valid(&config)); config.queue_size = 33; assert!(!net.is_valid(&config)); } #[test] #[serial] fn test_net_ioctls() { let m = GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0), 0x10_0000)]).unwrap(); let net = Net::new(&m).unwrap(); let backend = OpenOptions::new() .read(true) .write(true) .open("/dev/null") .unwrap(); let features = net.get_features().unwrap(); net.set_features(features).unwrap(); net.set_owner().unwrap(); net.set_mem_table(&[]).unwrap_err(); let region = VhostUserMemoryRegionInfo::new( 0x0, 0x10_0000, m.get_host_address(GuestAddress(0x0)).unwrap() as u64, 0, -1, ); net.set_mem_table(&[region]).unwrap(); net.set_log_base( 0x4000, Some(VhostUserDirtyLogRegion { mmap_size: 0x1000, mmap_offset: 0x10, mmap_handle: 1, }), ) .unwrap_err(); net.set_log_base(0x4000, None).unwrap(); let eventfd = EventFd::new(0).unwrap(); net.set_log_fd(eventfd.as_raw_fd()).unwrap(); net.set_vring_num(0, 32).unwrap(); let config = VringConfigData { queue_max_size: 32, queue_size: 32, flags: 0, desc_table_addr: 0x1000, used_ring_addr: 0x2000, avail_ring_addr: 0x3000, log_addr: None, }; net.set_vring_addr(0, &config).unwrap(); net.set_vring_base(0, 1).unwrap(); net.set_vring_call(0, &eventfd).unwrap(); net.set_vring_kick(0, &eventfd).unwrap(); net.set_vring_err(0, &eventfd).unwrap(); assert_eq!(net.get_vring_base(0).unwrap(), 1); net.set_backend(0, Some(&backend)).unwrap_err(); net.set_backend(0, None).unwrap(); } } vhost-0.10.0/src/vhost_kern/vdpa.rs000064400000000000000000000455211046102023000153250ustar 00000000000000// Copyright (C) 2021 Red Hat, Inc. All rights reserved. // SPDX-License-Identifier: Apache-2.0 or BSD-3-Clause //! Kernel-based vhost-vdpa backend. use std::fs::{File, OpenOptions}; use std::io::Error as IOError; use std::os::raw::{c_uchar, c_uint}; use std::os::unix::fs::OpenOptionsExt; use std::os::unix::io::{AsRawFd, RawFd}; use vm_memory::GuestAddressSpace; use vmm_sys_util::eventfd::EventFd; use vmm_sys_util::fam::*; use vmm_sys_util::ioctl::{ioctl, ioctl_with_mut_ref, ioctl_with_ptr, ioctl_with_ref}; use super::vhost_binding::*; use super::{ioctl_result, Error, Result, VhostKernBackend, VhostKernFeatures}; use crate::vdpa::*; use crate::{VhostAccess, VhostIotlbBackend, VhostIotlbMsg, VhostIotlbType, VringConfigData}; // Implement the FamStruct trait for vhost_vdpa_config generate_fam_struct_impl!( vhost_vdpa_config, c_uchar, buf, c_uint, len, c_uint::MAX as usize ); type VhostVdpaConfig = FamStructWrapper; /// Handle for running VHOST_VDPA ioctls. pub struct VhostKernVdpa { fd: File, mem: AS, backend_features_acked: u64, } impl VhostKernVdpa { /// Open a handle to a new VHOST-VDPA instance. pub fn new(path: &str, mem: AS) -> Result { Ok(VhostKernVdpa { fd: OpenOptions::new() .read(true) .write(true) .custom_flags(libc::O_CLOEXEC | libc::O_NONBLOCK) .open(path) .map_err(Error::VhostOpen)?, mem, backend_features_acked: 0, }) } /// Create a `VhostKernVdpa` object with given content. pub fn with(fd: File, mem: AS, backend_features_acked: u64) -> Self { VhostKernVdpa { fd, mem, backend_features_acked, } } /// Set the addresses for a given vring. /// /// # Arguments /// * `queue_index` - Index of the queue to set addresses for. /// * `config_data` - Vring config data, addresses of desc_table, avail_ring /// and used_ring are in the guest address space. pub fn set_vring_addr(&self, queue_index: usize, config_data: &VringConfigData) -> Result<()> { if !self.is_valid(config_data) { return Err(Error::InvalidQueue); } // vDPA backends expect IOVA (that can be mapped 1:1 with // GPA when no IOMMU is involved). let vring_addr = vhost_vring_addr { index: queue_index as u32, flags: config_data.flags, desc_user_addr: config_data.desc_table_addr, used_user_addr: config_data.used_ring_addr, avail_user_addr: config_data.avail_ring_addr, log_guest_addr: config_data.get_log_addr(), }; // SAFETY: This ioctl is called on a valid vhost-vdpa fd and has its // return value checked. let ret = unsafe { ioctl_with_ref(self, VHOST_SET_VRING_ADDR(), &vring_addr) }; ioctl_result(ret, ()) } } impl VhostVdpa for VhostKernVdpa { fn get_device_id(&self) -> Result { let mut device_id: u32 = 0; // SAFETY: This ioctl is called on a valid vhost-vdpa fd and has its // return value checked. let ret = unsafe { ioctl_with_mut_ref(self, VHOST_VDPA_GET_DEVICE_ID(), &mut device_id) }; ioctl_result(ret, device_id) } fn get_status(&self) -> Result { let mut status: u8 = 0; // SAFETY: This ioctl is called on a valid vhost-vdpa fd and has its // return value checked. let ret = unsafe { ioctl_with_mut_ref(self, VHOST_VDPA_GET_STATUS(), &mut status) }; ioctl_result(ret, status) } fn set_status(&self, status: u8) -> Result<()> { // SAFETY: This ioctl is called on a valid vhost-vdpa fd and has its // return value checked. let ret = unsafe { ioctl_with_ref(self, VHOST_VDPA_SET_STATUS(), &status) }; ioctl_result(ret, ()) } fn get_config(&self, offset: u32, buffer: &mut [u8]) -> Result<()> { let mut config = VhostVdpaConfig::new(buffer.len()) .map_err(|_| Error::IoctlError(IOError::from_raw_os_error(libc::ENOMEM)))?; // SAFETY: We are not modifying the `len` field of the vhost-vdpa fam-struct unsafe { config.as_mut_fam_struct().off = offset; } // SAFETY: This ioctl is called on a valid vhost-vdpa fd and has its // return value checked. let ret = unsafe { ioctl_with_ptr( self, VHOST_VDPA_GET_CONFIG(), config.as_mut_fam_struct_ptr(), ) }; buffer.copy_from_slice(config.as_slice()); ioctl_result(ret, ()) } fn set_config(&self, offset: u32, buffer: &[u8]) -> Result<()> { let mut config = VhostVdpaConfig::new(buffer.len()) .map_err(|_| Error::IoctlError(IOError::from_raw_os_error(libc::ENOMEM)))?; // SAFETY: We are not modifying the `len` field of the vhost-vdpa fam-struct unsafe { config.as_mut_fam_struct().off = offset; } config.as_mut_slice().copy_from_slice(buffer); let ret = // SAFETY: This ioctl is called on a valid vhost-vdpa fd and has its // return value checked. unsafe { ioctl_with_ptr(self, VHOST_VDPA_SET_CONFIG(), config.as_fam_struct_ptr()) }; ioctl_result(ret, ()) } fn set_vring_enable(&self, queue_index: usize, enabled: bool) -> Result<()> { let vring_state = vhost_vring_state { index: queue_index as u32, num: enabled as u32, }; // SAFETY: This ioctl is called on a valid vhost-vdpa fd and has its // return value checked. let ret = unsafe { ioctl_with_ref(self, VHOST_VDPA_SET_VRING_ENABLE(), &vring_state) }; ioctl_result(ret, ()) } fn get_vring_num(&self) -> Result { let mut vring_num: u16 = 0; // SAFETY: This ioctl is called on a valid vhost-vdpa fd and has its // return value checked. let ret = unsafe { ioctl_with_mut_ref(self, VHOST_VDPA_GET_VRING_NUM(), &mut vring_num) }; ioctl_result(ret, vring_num) } fn set_config_call(&self, fd: &EventFd) -> Result<()> { let event_fd: ::std::os::raw::c_int = fd.as_raw_fd(); // SAFETY: This ioctl is called on a valid vhost-vdpa fd and has its // return value checked. let ret = unsafe { ioctl_with_ref(self, VHOST_VDPA_SET_CONFIG_CALL(), &event_fd) }; ioctl_result(ret, ()) } fn get_iova_range(&self) -> Result { let mut low_iova_range = vhost_vdpa_iova_range { first: 0, last: 0 }; let ret = // SAFETY: This ioctl is called on a valid vhost-vdpa fd and has its // return value checked. unsafe { ioctl_with_mut_ref(self, VHOST_VDPA_GET_IOVA_RANGE(), &mut low_iova_range) }; let iova_range = VhostVdpaIovaRange { first: low_iova_range.first, last: low_iova_range.last, }; ioctl_result(ret, iova_range) } fn get_config_size(&self) -> Result { let mut config_size: u32 = 0; let ret = // SAFETY: This ioctl is called on a valid vhost-vdpa fd and has its // return value checked. unsafe { ioctl_with_mut_ref(self, VHOST_VDPA_GET_CONFIG_SIZE(), &mut config_size) }; ioctl_result(ret, config_size) } fn get_vqs_count(&self) -> Result { let mut vqs_count: u32 = 0; // SAFETY: This ioctl is called on a valid vhost-vdpa fd and has its // return value checked. let ret = unsafe { ioctl_with_mut_ref(self, VHOST_VDPA_GET_VQS_COUNT(), &mut vqs_count) }; ioctl_result(ret, vqs_count) } fn get_group_num(&self) -> Result { let mut group_num: u32 = 0; // SAFETY: This ioctl is called on a valid vhost-vdpa fd and has its // return value checked. let ret = unsafe { ioctl_with_mut_ref(self, VHOST_VDPA_GET_GROUP_NUM(), &mut group_num) }; ioctl_result(ret, group_num) } fn get_as_num(&self) -> Result { let mut as_num: u32 = 0; // SAFETY: This ioctl is called on a valid vhost-vdpa fd and has its // return value checked. let ret = unsafe { ioctl_with_mut_ref(self, VHOST_VDPA_GET_AS_NUM(), &mut as_num) }; ioctl_result(ret, as_num) } fn get_vring_group(&self, queue_index: u32) -> Result { let mut vring_state = vhost_vring_state { index: queue_index, ..Default::default() }; let ret = // SAFETY: This ioctl is called on a valid vhost-vdpa fd and has its // return value checked. unsafe { ioctl_with_mut_ref(self, VHOST_VDPA_GET_VRING_GROUP(), &mut vring_state) }; ioctl_result(ret, vring_state.num) } fn set_group_asid(&self, group_index: u32, asid: u32) -> Result<()> { let vring_state = vhost_vring_state { index: group_index, num: asid, }; // SAFETY: This ioctl is called on a valid vhost-vdpa fd and has its // return value checked. let ret = unsafe { ioctl_with_ref(self, VHOST_VDPA_GET_VRING_GROUP(), &vring_state) }; ioctl_result(ret, ()) } fn suspend(&self) -> Result<()> { // SAFETY: This ioctl is called on a valid vhost-vdpa fd and has its // return value checked. let ret = unsafe { ioctl(self, VHOST_VDPA_SUSPEND()) }; ioctl_result(ret, ()) } fn dma_map(&self, iova: u64, size: u64, vaddr: *const u8, readonly: bool) -> Result<()> { let iotlb = VhostIotlbMsg { iova, size, userspace_addr: vaddr as u64, perm: match readonly { true => VhostAccess::ReadOnly, false => VhostAccess::ReadWrite, }, msg_type: VhostIotlbType::Update, }; self.send_iotlb_msg(&iotlb) } fn dma_unmap(&self, iova: u64, size: u64) -> Result<()> { let iotlb = VhostIotlbMsg { iova, size, msg_type: VhostIotlbType::Invalidate, ..Default::default() }; self.send_iotlb_msg(&iotlb) } } impl VhostKernBackend for VhostKernVdpa { type AS = AS; fn mem(&self) -> &Self::AS { &self.mem } /// Check whether the ring configuration is valid. fn is_valid(&self, config_data: &VringConfigData) -> bool { let queue_size = config_data.queue_size; if queue_size > config_data.queue_max_size || queue_size == 0 || (queue_size & (queue_size - 1)) != 0 { return false; } // Since vDPA could be dealing with IOVAs corresponding to GVAs, it // wouldn't make sense to go through the validation of the descriptor // table address, available ring address and used ring address against // the guest memory representation we have access to. config_data.is_log_addr_valid() } } impl AsRawFd for VhostKernVdpa { fn as_raw_fd(&self) -> RawFd { self.fd.as_raw_fd() } } impl VhostKernFeatures for VhostKernVdpa { fn get_backend_features_acked(&self) -> u64 { self.backend_features_acked } fn set_backend_features_acked(&mut self, features: u64) { self.backend_features_acked = features; } } #[cfg(test)] mod tests { const VHOST_VDPA_PATH: &str = "/dev/vhost-vdpa-0"; use std::alloc::{alloc, dealloc, Layout}; use vm_memory::{GuestAddress, GuestMemory, GuestMemoryMmap}; use vmm_sys_util::eventfd::EventFd; use super::*; use crate::{ VhostBackend, VhostUserDirtyLogRegion, VhostUserMemoryRegionInfo, VringConfigData, }; use serial_test::serial; use std::io::ErrorKind; /// macro to skip test if vhost-vdpa device path is not found. /// /// vDPA simulators are available since Linux 5.7, but the CI may have /// an older kernel, so for now we skip the test if we don't find /// the device. macro_rules! unwrap_not_found { ( $e:expr ) => { match $e { Ok(v) => v, Err(error) => match error { Error::VhostOpen(ref e) if e.kind() == ErrorKind::NotFound => { println!("Err: {:?} SKIPPED", e); return; } e => panic!("Err: {:?}", e), }, } }; } macro_rules! validate_ioctl { ( $e:expr, $ref_value:expr ) => { match $e { Ok(v) => assert_eq!(v, $ref_value), Err(error) => match error { Error::IoctlError(e) if e.raw_os_error().unwrap() == libc::ENOTTY => { println!("Err: {:?} SKIPPED", e); } e => panic!("Err: {:?}", e), }, } }; } #[test] #[serial] fn test_vdpa_kern_new_device() { let m = GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0), 0x10_0000)]).unwrap(); let vdpa = unwrap_not_found!(VhostKernVdpa::new(VHOST_VDPA_PATH, &m)); assert!(vdpa.as_raw_fd() >= 0); assert!(vdpa.mem().find_region(GuestAddress(0x100)).is_some()); assert!(vdpa.mem().find_region(GuestAddress(0x10_0000)).is_none()); } #[test] #[serial] fn test_vdpa_kern_is_valid() { let m = GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0), 0x10_0000)]).unwrap(); let vdpa = unwrap_not_found!(VhostKernVdpa::new(VHOST_VDPA_PATH, &m)); let mut config = VringConfigData { queue_max_size: 32, queue_size: 32, flags: 0, desc_table_addr: 0x1000, used_ring_addr: 0x2000, avail_ring_addr: 0x3000, log_addr: None, }; assert!(vdpa.is_valid(&config)); config.queue_size = 0; assert!(!vdpa.is_valid(&config)); config.queue_size = 31; assert!(!vdpa.is_valid(&config)); config.queue_size = 33; assert!(!vdpa.is_valid(&config)); } #[test] #[serial] fn test_vdpa_kern_ioctls() { let m = GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0), 0x10_0000)]).unwrap(); let vdpa = unwrap_not_found!(VhostKernVdpa::new(VHOST_VDPA_PATH, &m)); let features = vdpa.get_features().unwrap(); // VIRTIO_F_VERSION_1 (bit 32) should be set assert_ne!(features & (1 << 32), 0); vdpa.set_features(features).unwrap(); vdpa.set_owner().unwrap(); vdpa.set_mem_table(&[]).unwrap_err(); let region = VhostUserMemoryRegionInfo::new( 0x0, 0x10_0000, m.get_host_address(GuestAddress(0x0)).unwrap() as u64, 0, -1, ); vdpa.set_mem_table(&[region]).unwrap(); let device_id = vdpa.get_device_id().unwrap(); assert!(device_id > 0); assert_eq!(vdpa.get_status().unwrap(), 0x0); vdpa.set_status(0x1).unwrap(); assert_eq!(vdpa.get_status().unwrap(), 0x1); let mut vec = vec![0u8; 8]; vdpa.get_config(0, &mut vec).unwrap(); vdpa.set_config(0, &vec).unwrap(); let eventfd = EventFd::new(0).unwrap(); // set_log_base() and set_log_fd() are not supported by vhost-vdpa vdpa.set_log_base( 0x4000, Some(VhostUserDirtyLogRegion { mmap_size: 0x1000, mmap_offset: 0x10, mmap_handle: 1, }), ) .unwrap_err(); vdpa.set_log_base(0x4000, None).unwrap_err(); vdpa.set_log_fd(eventfd.as_raw_fd()).unwrap_err(); let max_queues = vdpa.get_vring_num().unwrap(); vdpa.set_vring_num(0, max_queues + 1).unwrap_err(); vdpa.set_vring_num(0, 32).unwrap(); let config = VringConfigData { queue_max_size: 32, queue_size: 32, flags: 0, desc_table_addr: 0x1000, used_ring_addr: 0x2000, avail_ring_addr: 0x3000, log_addr: None, }; vdpa.set_vring_addr(0, &config).unwrap(); vdpa.set_vring_base(0, 1).unwrap(); vdpa.set_vring_call(0, &eventfd).unwrap(); vdpa.set_vring_kick(0, &eventfd).unwrap(); vdpa.set_vring_err(0, &eventfd).unwrap(); vdpa.set_config_call(&eventfd).unwrap(); let iova_range = vdpa.get_iova_range().unwrap(); // vDPA-block simulator returns [0, u64::MAX] range assert_eq!(iova_range.first, 0); assert_eq!(iova_range.last, u64::MAX); let (config_size, vqs_count, group_num, as_num, vring_group) = if device_id == 1 { (24, 3, 2, 2, 0) } else if device_id == 2 { (60, 1, 1, 1, 0) } else { panic!("Unexpected device id {}", device_id) }; validate_ioctl!(vdpa.get_config_size(), config_size); validate_ioctl!(vdpa.get_vqs_count(), vqs_count); validate_ioctl!(vdpa.get_group_num(), group_num); validate_ioctl!(vdpa.get_as_num(), as_num); validate_ioctl!(vdpa.get_vring_group(0), vring_group); validate_ioctl!(vdpa.set_group_asid(0, 12345), ()); if vdpa.get_backend_features().unwrap() & (1 << VHOST_BACKEND_F_SUSPEND) == (1 << VHOST_BACKEND_F_SUSPEND) { validate_ioctl!(vdpa.suspend(), ()); } assert_eq!(vdpa.get_vring_base(0).unwrap(), 1); vdpa.set_vring_enable(0, true).unwrap(); vdpa.set_vring_enable(0, false).unwrap(); } #[test] #[serial] fn test_vdpa_kern_dma() { let m = GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0), 0x10_0000)]).unwrap(); let mut vdpa = unwrap_not_found!(VhostKernVdpa::new(VHOST_VDPA_PATH, &m)); let features = vdpa.get_features().unwrap(); // VIRTIO_F_VERSION_1 (bit 32) should be set assert_ne!(features & (1 << 32), 0); vdpa.set_features(features).unwrap(); let backend_features = vdpa.get_backend_features().unwrap(); assert_ne!(backend_features & (1 << VHOST_BACKEND_F_IOTLB_MSG_V2), 0); vdpa.set_backend_features(backend_features).unwrap(); vdpa.set_owner().unwrap(); vdpa.dma_map(0xFFFF_0000, 0xFFFF, std::ptr::null::(), false) .unwrap_err(); let layout = Layout::from_size_align(0xFFFF, 1).unwrap(); // SAFETY: Safe because layout has non-zero size. let ptr = unsafe { alloc(layout) }; vdpa.dma_map(0xFFFF_0000, 0xFFFF, ptr, false).unwrap(); vdpa.dma_unmap(0xFFFF_0000, 0xFFFF).unwrap(); // SAFETY: Safe because `ptr` is allocated with the same allocator // using the same `layout`. unsafe { dealloc(ptr, layout) }; } } vhost-0.10.0/src/vhost_kern/vhost_binding.rs000064400000000000000000000420431046102023000172240ustar 00000000000000// Copyright (C) 2019 Alibaba Cloud Computing. All rights reserved. // SPDX-License-Identifier: Apache-2.0 or BSD-3-Clause // // Portions Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. // // Portions Copyright 2017 The Chromium OS Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE-BSD-Google file. /* Auto-generated by bindgen then manually edited for simplicity */ #![allow(non_upper_case_globals)] #![allow(non_camel_case_types)] #![allow(non_snake_case)] #![allow(missing_docs)] #![allow(clippy::missing_safety_doc)] use crate::{Error, Result}; use std::os::raw; pub const VHOST: raw::c_uint = 0xaf; pub const VHOST_VRING_F_LOG: raw::c_uint = 0; pub const VHOST_ACCESS_RO: raw::c_uchar = 1; pub const VHOST_ACCESS_WO: raw::c_uchar = 2; pub const VHOST_ACCESS_RW: raw::c_uchar = 3; pub const VHOST_IOTLB_MISS: raw::c_uchar = 1; pub const VHOST_IOTLB_UPDATE: raw::c_uchar = 2; pub const VHOST_IOTLB_INVALIDATE: raw::c_uchar = 3; pub const VHOST_IOTLB_ACCESS_FAIL: raw::c_uchar = 4; pub const VHOST_IOTLB_BATCH_BEGIN: raw::c_uchar = 5; pub const VHOST_IOTLB_BATCH_END: raw::c_uchar = 6; pub const VHOST_IOTLB_MSG: raw::c_int = 1; pub const VHOST_IOTLB_MSG_V2: raw::c_uint = 2; pub const VHOST_PAGE_SIZE: raw::c_uint = 4096; pub const VHOST_VIRTIO: raw::c_uint = 175; pub const VHOST_VRING_LITTLE_ENDIAN: raw::c_uint = 0; pub const VHOST_VRING_BIG_ENDIAN: raw::c_uint = 1; pub const VHOST_F_LOG_ALL: raw::c_uint = 26; pub const VHOST_NET_F_VIRTIO_NET_HDR: raw::c_uint = 27; pub const VHOST_SCSI_ABI_VERSION: raw::c_uint = 1; pub const VHOST_BACKEND_F_IOTLB_MSG_V2: raw::c_ulonglong = 0x1; pub const VHOST_BACKEND_F_IOTLB_BATCH: raw::c_ulonglong = 0x2; pub const VHOST_BACKEND_F_IOTLB_ASID: raw::c_ulonglong = 0x3; pub const VHOST_BACKEND_F_SUSPEND: raw::c_ulonglong = 0x4; ioctl_ior_nr!(VHOST_GET_FEATURES, VHOST, 0x00, raw::c_ulonglong); ioctl_iow_nr!(VHOST_SET_FEATURES, VHOST, 0x00, raw::c_ulonglong); ioctl_io_nr!(VHOST_SET_OWNER, VHOST, 0x01); ioctl_io_nr!(VHOST_RESET_OWNER, VHOST, 0x02); ioctl_iow_nr!(VHOST_SET_MEM_TABLE, VHOST, 0x03, vhost_memory); ioctl_iow_nr!(VHOST_SET_LOG_BASE, VHOST, 0x04, raw::c_ulonglong); ioctl_iow_nr!(VHOST_SET_LOG_FD, VHOST, 0x07, raw::c_int); ioctl_iow_nr!(VHOST_SET_VRING_NUM, VHOST, 0x10, vhost_vring_state); ioctl_iow_nr!(VHOST_SET_VRING_ADDR, VHOST, 0x11, vhost_vring_addr); ioctl_iow_nr!(VHOST_SET_VRING_BASE, VHOST, 0x12, vhost_vring_state); ioctl_iowr_nr!(VHOST_GET_VRING_BASE, VHOST, 0x12, vhost_vring_state); ioctl_iow_nr!(VHOST_SET_VRING_KICK, VHOST, 0x20, vhost_vring_file); ioctl_iow_nr!(VHOST_SET_VRING_CALL, VHOST, 0x21, vhost_vring_file); ioctl_iow_nr!(VHOST_SET_VRING_ERR, VHOST, 0x22, vhost_vring_file); ioctl_iow_nr!(VHOST_SET_BACKEND_FEATURES, VHOST, 0x25, raw::c_ulonglong); ioctl_ior_nr!(VHOST_GET_BACKEND_FEATURES, VHOST, 0x26, raw::c_ulonglong); ioctl_iow_nr!(VHOST_NET_SET_BACKEND, VHOST, 0x30, vhost_vring_file); ioctl_iow_nr!(VHOST_SCSI_SET_ENDPOINT, VHOST, 0x40, vhost_scsi_target); ioctl_iow_nr!(VHOST_SCSI_CLEAR_ENDPOINT, VHOST, 0x41, vhost_scsi_target); ioctl_iow_nr!(VHOST_SCSI_GET_ABI_VERSION, VHOST, 0x42, raw::c_int); ioctl_iow_nr!(VHOST_SCSI_SET_EVENTS_MISSED, VHOST, 0x43, raw::c_uint); ioctl_iow_nr!(VHOST_SCSI_GET_EVENTS_MISSED, VHOST, 0x44, raw::c_uint); ioctl_iow_nr!(VHOST_VSOCK_SET_GUEST_CID, VHOST, 0x60, raw::c_ulonglong); ioctl_iow_nr!(VHOST_VSOCK_SET_RUNNING, VHOST, 0x61, raw::c_int); ioctl_ior_nr!(VHOST_VDPA_GET_DEVICE_ID, VHOST, 0x70, raw::c_uint); ioctl_ior_nr!(VHOST_VDPA_GET_STATUS, VHOST, 0x71, raw::c_uchar); ioctl_iow_nr!(VHOST_VDPA_SET_STATUS, VHOST, 0x72, raw::c_uchar); ioctl_ior_nr!(VHOST_VDPA_GET_CONFIG, VHOST, 0x73, vhost_vdpa_config); ioctl_iow_nr!(VHOST_VDPA_SET_CONFIG, VHOST, 0x74, vhost_vdpa_config); ioctl_iow_nr!(VHOST_VDPA_SET_VRING_ENABLE, VHOST, 0x75, vhost_vring_state); ioctl_ior_nr!(VHOST_VDPA_GET_VRING_NUM, VHOST, 0x76, raw::c_ushort); ioctl_iow_nr!(VHOST_VDPA_SET_CONFIG_CALL, VHOST, 0x77, raw::c_int); ioctl_ior_nr!( VHOST_VDPA_GET_IOVA_RANGE, VHOST, 0x78, vhost_vdpa_iova_range ); ioctl_ior_nr!(VHOST_VDPA_GET_CONFIG_SIZE, VHOST, 0x79, raw::c_uint); ioctl_ior_nr!(VHOST_VDPA_GET_VQS_COUNT, VHOST, 0x80, raw::c_uint); ioctl_ior_nr!(VHOST_VDPA_GET_GROUP_NUM, VHOST, 0x81, raw::c_uint); ioctl_ior_nr!(VHOST_VDPA_GET_AS_NUM, VHOST, 0x7a, raw::c_uint); ioctl_iowr_nr!(VHOST_VDPA_GET_VRING_GROUP, VHOST, 0x7b, vhost_vring_state); ioctl_iow_nr!(VHOST_VDPA_SET_GROUP_ASID, VHOST, 0x7c, vhost_vring_state); ioctl_io_nr!(VHOST_VDPA_SUSPEND, VHOST, 0x7d); #[repr(C)] #[derive(Default)] pub struct __IncompleteArrayField(::std::marker::PhantomData); impl __IncompleteArrayField { #[inline] pub fn new() -> Self { __IncompleteArrayField(::std::marker::PhantomData) } #[inline] #[allow(clippy::trivially_copy_pass_by_ref)] #[allow(clippy::useless_transmute)] pub unsafe fn as_ptr(&self) -> *const T { ::std::mem::transmute(self) } #[inline] #[allow(clippy::useless_transmute)] pub unsafe fn as_mut_ptr(&mut self) -> *mut T { ::std::mem::transmute(self) } #[inline] pub unsafe fn as_slice(&self, len: usize) -> &[T] { ::std::slice::from_raw_parts(self.as_ptr(), len) } #[inline] pub unsafe fn as_mut_slice(&mut self, len: usize) -> &mut [T] { ::std::slice::from_raw_parts_mut(self.as_mut_ptr(), len) } } impl ::std::fmt::Debug for __IncompleteArrayField { fn fmt(&self, fmt: &mut ::std::fmt::Formatter) -> ::std::fmt::Result { fmt.write_str("__IncompleteArrayField") } } impl ::std::clone::Clone for __IncompleteArrayField { #[inline] fn clone(&self) -> Self { *self } } impl ::std::marker::Copy for __IncompleteArrayField {} #[repr(C)] #[derive(Debug, Default, Copy, Clone)] pub struct vhost_vring_state { pub index: raw::c_uint, pub num: raw::c_uint, } #[repr(C)] #[derive(Debug, Default, Copy, Clone)] pub struct vhost_vring_file { pub index: raw::c_uint, pub fd: raw::c_int, } #[repr(C)] #[derive(Debug, Default, Copy, Clone)] pub struct vhost_vring_addr { pub index: raw::c_uint, pub flags: raw::c_uint, pub desc_user_addr: raw::c_ulonglong, pub used_user_addr: raw::c_ulonglong, pub avail_user_addr: raw::c_ulonglong, pub log_guest_addr: raw::c_ulonglong, } #[repr(C)] #[derive(Debug, Default, Copy, Clone)] pub struct vhost_iotlb_msg { pub iova: raw::c_ulonglong, pub size: raw::c_ulonglong, pub uaddr: raw::c_ulonglong, pub perm: raw::c_uchar, pub type_: raw::c_uchar, } #[repr(C)] #[derive(Copy, Clone)] pub struct vhost_msg { pub type_: raw::c_int, pub __bindgen_anon_1: vhost_msg__bindgen_ty_1, } impl Default for vhost_msg { fn default() -> Self { // SAFETY: Zeroing all bytes is fine because they represent a valid // value for all members of the structure unsafe { ::std::mem::zeroed() } } } #[repr(C)] #[derive(Copy, Clone)] pub union vhost_msg__bindgen_ty_1 { pub iotlb: vhost_iotlb_msg, pub padding: [raw::c_uchar; 64usize], _bindgen_union_align: [u64; 8usize], } impl Default for vhost_msg__bindgen_ty_1 { fn default() -> Self { // SAFETY: Zeroing all bytes is fine because they represent a valid // value for all members of the structure unsafe { ::std::mem::zeroed() } } } #[repr(C)] #[derive(Copy, Clone)] pub struct vhost_msg_v2 { pub type_: raw::c_uint, pub reserved: raw::c_uint, pub __bindgen_anon_1: vhost_msg_v2__bindgen_ty_1, } impl Default for vhost_msg_v2 { fn default() -> Self { // SAFETY: Zeroing all bytes is fine because they represent a valid // value for all members of the structure unsafe { ::std::mem::zeroed() } } } #[repr(C)] #[derive(Copy, Clone)] pub union vhost_msg_v2__bindgen_ty_1 { pub iotlb: vhost_iotlb_msg, pub padding: [raw::c_uchar; 64usize], _bindgen_union_align: [u64; 8usize], } impl Default for vhost_msg_v2__bindgen_ty_1 { fn default() -> Self { // SAFETY: Zeroing all bytes is fine because they represent a valid // value for all members of the structure unsafe { ::std::mem::zeroed() } } } #[repr(C)] #[derive(Debug, Default, Copy, Clone)] pub struct vhost_memory_region { pub guest_phys_addr: raw::c_ulonglong, pub memory_size: raw::c_ulonglong, pub userspace_addr: raw::c_ulonglong, pub flags_padding: raw::c_ulonglong, } #[repr(C)] #[derive(Debug, Default, Clone)] pub struct vhost_memory { pub nregions: raw::c_uint, pub padding: raw::c_uint, pub regions: __IncompleteArrayField, __force_alignment: [u64; 0], } #[repr(C)] #[derive(Copy, Clone)] pub struct vhost_scsi_target { pub abi_version: raw::c_int, pub vhost_wwpn: [raw::c_char; 224usize], pub vhost_tpgt: raw::c_ushort, pub reserved: raw::c_ushort, } impl Default for vhost_scsi_target { fn default() -> Self { // SAFETY: Zeroing all bytes is fine because they represent a valid // value for all members of the structure unsafe { ::std::mem::zeroed() } } } #[repr(C)] #[derive(Debug, Default)] pub struct vhost_vdpa_config { pub off: raw::c_uint, pub len: raw::c_uint, pub buf: __IncompleteArrayField, } #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct vhost_vdpa_iova_range { pub first: raw::c_ulonglong, pub last: raw::c_ulonglong, } /// Helper to support vhost::set_mem_table() pub struct VhostMemory { buf: Vec, } impl VhostMemory { // Limit number of regions to u16 to simplify error handling pub fn new(entries: u16) -> Self { let size = std::mem::size_of::() * entries as usize; let count = (size + 2 * std::mem::size_of::() - 1) / std::mem::size_of::(); let mut buf: Vec = vec![Default::default(); count]; buf[0].nregions = u32::from(entries); VhostMemory { buf } } pub fn as_ptr(&self) -> *const char { &self.buf[0] as *const vhost_memory as *const char } pub fn get_header(&self) -> &vhost_memory { &self.buf[0] } pub fn get_region(&self, index: u32) -> Option<&vhost_memory_region> { if index >= self.buf[0].nregions { return None; } // SAFETY: Safe because we have allocated enough space nregions let regions = unsafe { self.buf[0].regions.as_slice(self.buf[0].nregions as usize) }; Some(®ions[index as usize]) } pub fn set_region(&mut self, index: u32, region: &vhost_memory_region) -> Result<()> { if index >= self.buf[0].nregions { return Err(Error::InvalidGuestMemory); } // SAFETY: Safe because we have allocated enough space nregions and checked the index. let regions = unsafe { self.buf[0].regions.as_mut_slice(index as usize + 1) }; regions[index as usize] = *region; Ok(()) } } #[cfg(test)] mod tests { use super::*; #[test] fn bindgen_test_layout_vhost_vring_state() { assert_eq!( ::std::mem::size_of::(), 8usize, concat!("Size of: ", stringify!(vhost_vring_state)) ); assert_eq!( ::std::mem::align_of::(), 4usize, concat!("Alignment of ", stringify!(vhost_vring_state)) ); } #[test] fn bindgen_test_layout_vhost_vring_file() { assert_eq!( ::std::mem::size_of::(), 8usize, concat!("Size of: ", stringify!(vhost_vring_file)) ); assert_eq!( ::std::mem::align_of::(), 4usize, concat!("Alignment of ", stringify!(vhost_vring_file)) ); } #[test] fn bindgen_test_layout_vhost_vring_addr() { assert_eq!( ::std::mem::size_of::(), 40usize, concat!("Size of: ", stringify!(vhost_vring_addr)) ); assert_eq!( ::std::mem::align_of::(), 8usize, concat!("Alignment of ", stringify!(vhost_vring_addr)) ); } #[test] fn bindgen_test_layout_vhost_msg__bindgen_ty_1() { assert_eq!( ::std::mem::size_of::(), 64usize, concat!("Size of: ", stringify!(vhost_msg__bindgen_ty_1)) ); assert_eq!( ::std::mem::align_of::(), 8usize, concat!("Alignment of ", stringify!(vhost_msg__bindgen_ty_1)) ); } #[test] fn bindgen_test_layout_vhost_msg() { assert_eq!( ::std::mem::size_of::(), 72usize, concat!("Size of: ", stringify!(vhost_msg)) ); assert_eq!( ::std::mem::align_of::(), 8usize, concat!("Alignment of ", stringify!(vhost_msg)) ); } #[test] fn bindgen_test_layout_vhost_msg_v2__bindgen_ty_1() { assert_eq!( ::std::mem::size_of::(), 64usize, concat!("Size of: ", stringify!(vhost_msg_v2__bindgen_ty_1)) ); assert_eq!( ::std::mem::align_of::(), 8usize, concat!("Alignment of ", stringify!(vhost_msg_v2__bindgen_ty_1)) ); } #[test] fn bindgen_test_layout_vhost_msg_v2() { assert_eq!( ::std::mem::size_of::(), 72usize, concat!("Size of: ", stringify!(vhost_msg_v2)) ); assert_eq!( ::std::mem::align_of::(), 8usize, concat!("Alignment of ", stringify!(vhost_msg_v2)) ); } #[test] fn bindgen_test_layout_vhost_memory_region() { assert_eq!( ::std::mem::size_of::(), 32usize, concat!("Size of: ", stringify!(vhost_memory_region)) ); assert_eq!( ::std::mem::align_of::(), 8usize, concat!("Alignment of ", stringify!(vhost_memory_region)) ); } #[test] fn bindgen_test_layout_vhost_memory() { assert_eq!( ::std::mem::size_of::(), 8usize, concat!("Size of: ", stringify!(vhost_memory)) ); assert_eq!( ::std::mem::align_of::(), 8usize, concat!("Alignment of ", stringify!(vhost_memory)) ); } #[test] fn bindgen_test_layout_vhost_iotlb_msg() { assert_eq!( ::std::mem::size_of::(), 32usize, concat!("Size of: ", stringify!(vhost_iotlb_msg)) ); assert_eq!( ::std::mem::align_of::(), 8usize, concat!("Alignment of ", stringify!(vhost_iotlb_msg)) ); } #[test] fn bindgen_test_layout_vhost_scsi_target() { assert_eq!( ::std::mem::size_of::(), 232usize, concat!("Size of: ", stringify!(vhost_scsi_target)) ); assert_eq!( ::std::mem::align_of::(), 4usize, concat!("Alignment of ", stringify!(vhost_scsi_target)) ); } #[test] fn bindgen_test_layout_vhost_vdpa_config() { assert_eq!( ::std::mem::size_of::(), 8usize, concat!("Size of: ", stringify!(vhost_vdpa_config)) ); assert_eq!( ::std::mem::align_of::(), 4usize, concat!("Alignment of ", stringify!(vhost_vdpa_config)) ); } #[test] fn bindgen_test_layout_vhost_vdpa_iova_range() { assert_eq!( ::std::mem::size_of::(), 16usize, concat!("Size of: ", stringify!(vhost_vdpa_iova_range)) ); assert_eq!( ::std::mem::align_of::(), 8usize, concat!("Alignment of ", stringify!(vhost_vdpa_iova_range)) ); } #[test] fn test_vhostmemory() { let mut obj = VhostMemory::new(2); let region = vhost_memory_region { guest_phys_addr: 0x1000u64, memory_size: 0x2000u64, userspace_addr: 0x300000u64, flags_padding: 0u64, }; assert!(obj.get_region(2).is_none()); { let header = obj.get_header(); assert_eq!(header.nregions, 2u32); } { assert!(obj.set_region(0, ®ion).is_ok()); assert!(obj.set_region(1, ®ion).is_ok()); assert!(obj.set_region(2, ®ion).is_err()); } let region1 = obj.get_region(1).unwrap(); assert_eq!(region1.guest_phys_addr, 0x1000u64); assert_eq!(region1.memory_size, 0x2000u64); assert_eq!(region1.userspace_addr, 0x300000u64); } } vhost-0.10.0/src/vhost_kern/vsock.rs000064400000000000000000000134041046102023000155130ustar 00000000000000// Copyright (C) 2019 Alibaba Cloud. All rights reserved. // SPDX-License-Identifier: Apache-2.0 or BSD-3-Clause // // Copyright 2017 The Chromium OS Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE-BSD-Google file. //! Kernel-based vhost-vsock backend. use std::fs::{File, OpenOptions}; use std::os::unix::fs::OpenOptionsExt; use std::os::unix::io::{AsRawFd, RawFd}; use vm_memory::GuestAddressSpace; use vmm_sys_util::ioctl::ioctl_with_ref; use super::vhost_binding::{VHOST_VSOCK_SET_GUEST_CID, VHOST_VSOCK_SET_RUNNING}; use super::{ioctl_result, Error, Result, VhostKernBackend}; use crate::vsock::VhostVsock; const VHOST_PATH: &str = "/dev/vhost-vsock"; /// Handle for running VHOST_VSOCK ioctls. pub struct Vsock { fd: File, mem: AS, } impl Vsock { /// Open a handle to a new VHOST-VSOCK instance. pub fn new(mem: AS) -> Result { Ok(Vsock { fd: OpenOptions::new() .read(true) .write(true) .custom_flags(libc::O_CLOEXEC | libc::O_NONBLOCK) .open(VHOST_PATH) .map_err(Error::VhostOpen)?, mem, }) } fn set_running(&self, running: bool) -> Result<()> { let on: ::std::os::raw::c_int = if running { 1 } else { 0 }; // SAFETY: This ioctl is called on a valid vhost-vsock fd and has its // return value checked. let ret = unsafe { ioctl_with_ref(&self.fd, VHOST_VSOCK_SET_RUNNING(), &on) }; ioctl_result(ret, ()) } } impl VhostVsock for Vsock { fn set_guest_cid(&self, cid: u64) -> Result<()> { // SAFETY: This ioctl is called on a valid vhost-vsock fd and has its // return value checked. let ret = unsafe { ioctl_with_ref(&self.fd, VHOST_VSOCK_SET_GUEST_CID(), &cid) }; ioctl_result(ret, ()) } fn start(&self) -> Result<()> { self.set_running(true) } fn stop(&self) -> Result<()> { self.set_running(false) } } impl VhostKernBackend for Vsock { type AS = AS; fn mem(&self) -> &Self::AS { &self.mem } } impl AsRawFd for Vsock { fn as_raw_fd(&self) -> RawFd { self.fd.as_raw_fd() } } #[cfg(test)] mod tests { use vm_memory::{GuestAddress, GuestMemory, GuestMemoryMmap}; use vmm_sys_util::eventfd::EventFd; use super::*; use crate::{ VhostBackend, VhostUserDirtyLogRegion, VhostUserMemoryRegionInfo, VringConfigData, }; #[test] fn test_vsock_new_device() { let m = GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0), 0x10_0000)]).unwrap(); let vsock = Vsock::new(&m).unwrap(); assert!(vsock.as_raw_fd() >= 0); assert!(vsock.mem().find_region(GuestAddress(0x100)).is_some()); assert!(vsock.mem().find_region(GuestAddress(0x10_0000)).is_none()); } #[test] fn test_vsock_is_valid() { let m = GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0), 0x10_0000)]).unwrap(); let vsock = Vsock::new(&m).unwrap(); let mut config = VringConfigData { queue_max_size: 32, queue_size: 32, flags: 0, desc_table_addr: 0x1000, used_ring_addr: 0x2000, avail_ring_addr: 0x3000, log_addr: None, }; assert!(vsock.is_valid(&config)); config.queue_size = 0; assert!(!vsock.is_valid(&config)); config.queue_size = 31; assert!(!vsock.is_valid(&config)); config.queue_size = 33; assert!(!vsock.is_valid(&config)); } #[test] fn test_vsock_ioctls() { let m = GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0), 0x10_0000)]).unwrap(); let vsock = Vsock::new(&m).unwrap(); let features = vsock.get_features().unwrap(); vsock.set_features(features).unwrap(); vsock.set_owner().unwrap(); vsock.set_mem_table(&[]).unwrap_err(); /* let region = VhostUserMemoryRegionInfo { guest_phys_addr: 0x0, memory_size: 0x10_0000, userspace_addr: 0, mmap_offset: 0, mmap_handle: -1, }; vsock.set_mem_table(&[region]).unwrap_err(); */ let region = VhostUserMemoryRegionInfo::new( 0x0, 0x10_0000, m.get_host_address(GuestAddress(0x0)).unwrap() as u64, 0, -1, ); vsock.set_mem_table(&[region]).unwrap(); vsock .set_log_base( 0x4000, Some(VhostUserDirtyLogRegion { mmap_size: 0x1000, mmap_offset: 0x10, mmap_handle: 1, }), ) .unwrap_err(); vsock.set_log_base(0x4000, None).unwrap(); let eventfd = EventFd::new(0).unwrap(); vsock.set_log_fd(eventfd.as_raw_fd()).unwrap(); vsock.set_vring_num(0, 32).unwrap(); let config = VringConfigData { queue_max_size: 32, queue_size: 32, flags: 0, desc_table_addr: 0x1000, used_ring_addr: 0x2000, avail_ring_addr: 0x3000, log_addr: None, }; vsock.set_vring_addr(0, &config).unwrap(); vsock.set_vring_base(0, 1).unwrap(); vsock.set_vring_call(0, &eventfd).unwrap(); vsock.set_vring_kick(0, &eventfd).unwrap(); vsock.set_vring_err(0, &eventfd).unwrap(); assert_eq!(vsock.get_vring_base(0).unwrap(), 1); vsock.set_guest_cid(0xdead).unwrap(); //vsock.start().unwrap(); //vsock.stop().unwrap(); } } vhost-0.10.0/src/vhost_user/backend.rs000064400000000000000000000060121046102023000157710ustar 00000000000000// Copyright (C) 2019 Alibaba Cloud Computing. All rights reserved. // SPDX-License-Identifier: Apache-2.0 //! Traits and Structs for vhost-user backend. use std::sync::Arc; use super::connection::{Endpoint, Listener}; use super::message::*; use super::{BackendReqHandler, Result, VhostUserBackendReqHandler}; /// Vhost-user backend side connection listener. pub struct BackendListener { listener: Listener, backend: Option>, } /// Sets up a listener for incoming frontend connections, and handles construction /// of a Backend on success. impl BackendListener { /// Create a unix domain socket for incoming frontend connections. pub fn new(listener: Listener, backend: Arc) -> Result { Ok(BackendListener { listener, backend: Some(backend), }) } /// Accept an incoming connection from the frontend, returning Some(Backend) on /// success, or None if the socket is nonblocking and no incoming connection /// was detected pub fn accept(&mut self) -> Result>> { if let Some(fd) = self.listener.accept()? { return Ok(Some(BackendReqHandler::new( Endpoint::::from_stream(fd), self.backend.take().unwrap(), ))); } Ok(None) } /// Change blocking status on the listener. pub fn set_nonblocking(&self, block: bool) -> Result<()> { self.listener.set_nonblocking(block) } } #[cfg(test)] mod tests { use std::sync::Mutex; use super::*; use crate::vhost_user::dummy_backend::DummyBackendReqHandler; #[test] fn test_backend_listener_set_nonblocking() { let backend = Arc::new(Mutex::new(DummyBackendReqHandler::new())); let listener = Listener::new("/tmp/vhost_user_lib_unit_test_backend_nonblocking", true).unwrap(); let backend_listener = BackendListener::new(listener, backend).unwrap(); backend_listener.set_nonblocking(true).unwrap(); backend_listener.set_nonblocking(false).unwrap(); backend_listener.set_nonblocking(false).unwrap(); backend_listener.set_nonblocking(true).unwrap(); backend_listener.set_nonblocking(true).unwrap(); } #[cfg(feature = "vhost-user-frontend")] #[test] fn test_backend_listener_accept() { use super::super::Frontend; let path = "/tmp/vhost_user_lib_unit_test_backend_accept"; let backend = Arc::new(Mutex::new(DummyBackendReqHandler::new())); let listener = Listener::new(path, true).unwrap(); let mut backend_listener = BackendListener::new(listener, backend).unwrap(); backend_listener.set_nonblocking(true).unwrap(); assert!(backend_listener.accept().unwrap().is_none()); assert!(backend_listener.accept().unwrap().is_none()); let _frontend = Frontend::connect(path, 1).unwrap(); let _backend = backend_listener.accept().unwrap().unwrap(); } } vhost-0.10.0/src/vhost_user/backend_req.rs000064400000000000000000000152421046102023000166450ustar 00000000000000// Copyright (C) 2020 Alibaba Cloud. All rights reserved. // SPDX-License-Identifier: Apache-2.0 use std::io; use std::mem; use std::os::unix::io::{AsRawFd, RawFd}; use std::os::unix::net::UnixStream; use std::sync::{Arc, Mutex, MutexGuard}; use super::connection::Endpoint; use super::message::*; use super::{Error, HandlerResult, Result, VhostUserFrontendReqHandler}; use vm_memory::ByteValued; struct BackendInternal { sock: Endpoint, // Protocol feature VHOST_USER_PROTOCOL_F_REPLY_ACK has been negotiated. reply_ack_negotiated: bool, // whether the endpoint has encountered any failure error: Option, } impl BackendInternal { fn check_state(&self) -> Result { match self.error { Some(e) => Err(Error::SocketBroken(std::io::Error::from_raw_os_error(e))), None => Ok(0), } } fn send_message( &mut self, request: BackendReq, body: &T, fds: Option<&[RawFd]>, ) -> Result { self.check_state()?; let len = mem::size_of::(); let mut hdr = VhostUserMsgHeader::new(request, 0, len as u32); if self.reply_ack_negotiated { hdr.set_need_reply(true); } self.sock.send_message(&hdr, body, fds)?; self.wait_for_ack(&hdr) } fn wait_for_ack(&mut self, hdr: &VhostUserMsgHeader) -> Result { self.check_state()?; if !self.reply_ack_negotiated { return Ok(0); } let (reply, body, rfds) = self.sock.recv_body::()?; if !reply.is_reply_for(hdr) || rfds.is_some() || !body.is_valid() { return Err(Error::InvalidMessage); } if body.value != 0 { return Err(Error::FrontendInternalError); } Ok(body.value) } } /// Request proxy to send vhost-user backend requests to the frontend through the backend /// communication channel. /// /// The [Backend] acts as a message proxy to forward vhost-user backend requests to the /// frontend through the vhost-user backend communication channel. The forwarded messages will be /// handled by the [FrontendReqHandler] server. /// /// [Backend]: struct.Backend.html /// [FrontendReqHandler]: struct.FrontendReqHandler.html #[derive(Clone)] pub struct Backend { // underlying Unix domain socket for communication node: Arc>, } impl Backend { fn new(ep: Endpoint) -> Self { Backend { node: Arc::new(Mutex::new(BackendInternal { sock: ep, reply_ack_negotiated: false, error: None, })), } } fn node(&self) -> MutexGuard { self.node.lock().unwrap() } fn send_message( &self, request: BackendReq, body: &T, fds: Option<&[RawFd]>, ) -> io::Result { self.node() .send_message(request, body, fds) .map_err(|e| io::Error::new(io::ErrorKind::Other, format!("{}", e))) } /// Create a new instance from a `UnixStream` object. pub fn from_stream(sock: UnixStream) -> Self { Self::new(Endpoint::::from_stream(sock)) } /// Set the negotiation state of the `VHOST_USER_PROTOCOL_F_REPLY_ACK` protocol feature. /// /// When the `VHOST_USER_PROTOCOL_F_REPLY_ACK` protocol feature has been negotiated, /// the "REPLY_ACK" flag will be set in the message header for every backend to frontend request /// message. pub fn set_reply_ack_flag(&self, enable: bool) { self.node().reply_ack_negotiated = enable; } /// Mark endpoint as failed with specified error code. pub fn set_failed(&self, error: i32) { self.node().error = Some(error); } } impl VhostUserFrontendReqHandler for Backend { /// Forward vhost-user-fs map file requests to the backend. fn fs_backend_map(&self, fs: &VhostUserFSBackendMsg, fd: &dyn AsRawFd) -> HandlerResult { self.send_message(BackendReq::FS_MAP, fs, Some(&[fd.as_raw_fd()])) } /// Forward vhost-user-fs unmap file requests to the frontend. fn fs_backend_unmap(&self, fs: &VhostUserFSBackendMsg) -> HandlerResult { self.send_message(BackendReq::FS_UNMAP, fs, None) } } #[cfg(test)] mod tests { use std::os::unix::io::AsRawFd; use super::*; #[test] fn test_backend_req_set_failed() { let (p1, _p2) = UnixStream::pair().unwrap(); let backend = Backend::from_stream(p1); assert!(backend.node().error.is_none()); backend.set_failed(libc::EAGAIN); assert_eq!(backend.node().error, Some(libc::EAGAIN)); } #[test] fn test_backend_req_send_failure() { let (p1, p2) = UnixStream::pair().unwrap(); let backend = Backend::from_stream(p1); backend.set_failed(libc::ECONNRESET); backend .fs_backend_map(&VhostUserFSBackendMsg::default(), &p2) .unwrap_err(); backend .fs_backend_unmap(&VhostUserFSBackendMsg::default()) .unwrap_err(); backend.node().error = None; } #[test] fn test_backend_req_recv_negative() { let (p1, p2) = UnixStream::pair().unwrap(); let backend = Backend::from_stream(p1); let mut frontend = Endpoint::::from_stream(p2); let len = mem::size_of::(); let mut hdr = VhostUserMsgHeader::new( BackendReq::FS_MAP, VhostUserHeaderFlag::REPLY.bits(), len as u32, ); let body = VhostUserU64::new(0); frontend .send_message(&hdr, &body, Some(&[frontend.as_raw_fd()])) .unwrap(); backend .fs_backend_map(&VhostUserFSBackendMsg::default(), &frontend) .unwrap(); backend.set_reply_ack_flag(true); backend .fs_backend_map(&VhostUserFSBackendMsg::default(), &frontend) .unwrap_err(); hdr.set_code(BackendReq::FS_UNMAP); frontend.send_message(&hdr, &body, None).unwrap(); backend .fs_backend_map(&VhostUserFSBackendMsg::default(), &frontend) .unwrap_err(); hdr.set_code(BackendReq::FS_MAP); let body = VhostUserU64::new(1); frontend.send_message(&hdr, &body, None).unwrap(); backend .fs_backend_map(&VhostUserFSBackendMsg::default(), &frontend) .unwrap_err(); let body = VhostUserU64::new(0); frontend.send_message(&hdr, &body, None).unwrap(); backend .fs_backend_map(&VhostUserFSBackendMsg::default(), &frontend) .unwrap(); } } vhost-0.10.0/src/vhost_user/backend_req_handler.rs000064400000000000000000001017021046102023000203370ustar 00000000000000// Copyright (C) 2019 Alibaba Cloud Computing. All rights reserved. // SPDX-License-Identifier: Apache-2.0 use std::fs::File; use std::mem; use std::os::unix::io::{AsRawFd, FromRawFd, IntoRawFd, RawFd}; use std::os::unix::net::UnixStream; use std::slice; use std::sync::{Arc, Mutex}; use vm_memory::ByteValued; use super::backend_req::Backend; use super::connection::Endpoint; use super::message::*; use super::{take_single_file, Error, Result}; /// Services provided to the frontend by the backend with interior mutability. /// /// The [VhostUserBackendReqHandler] trait defines the services provided to the frontend by the backend. /// And the [VhostUserBackendReqHandlerMut] trait is a helper mirroring [VhostUserBackendReqHandler], /// but without interior mutability. /// The vhost-user specification defines a frontend communication channel, by which frontends could /// request services from backends. The [VhostUserBackendReqHandler] trait defines services provided by /// backends, and it's used both on the frontend side and backend side. /// /// - on the frontend side, a stub forwarder implementing [VhostUserBackendReqHandler] will proxy /// service requests to backends. /// - on the backend side, the [BackendReqHandler] will forward service requests to a handler /// implementing [VhostUserBackendReqHandler]. /// /// The [VhostUserBackendReqHandler] trait is design with interior mutability to improve performance /// for multi-threading. /// /// [VhostUserBackendReqHandler]: trait.VhostUserBackendReqHandler.html /// [VhostUserBackendReqHandlerMut]: trait.VhostUserBackendReqHandlerMut.html /// [BackendReqHandler]: struct.BackendReqHandler.html #[allow(missing_docs)] pub trait VhostUserBackendReqHandler { fn set_owner(&self) -> Result<()>; fn reset_owner(&self) -> Result<()>; fn get_features(&self) -> Result; fn set_features(&self, features: u64) -> Result<()>; fn set_mem_table(&self, ctx: &[VhostUserMemoryRegion], files: Vec) -> Result<()>; fn set_vring_num(&self, index: u32, num: u32) -> Result<()>; fn set_vring_addr( &self, index: u32, flags: VhostUserVringAddrFlags, descriptor: u64, used: u64, available: u64, log: u64, ) -> Result<()>; fn set_vring_base(&self, index: u32, base: u32) -> Result<()>; fn get_vring_base(&self, index: u32) -> Result; fn set_vring_kick(&self, index: u8, fd: Option) -> Result<()>; fn set_vring_call(&self, index: u8, fd: Option) -> Result<()>; fn set_vring_err(&self, index: u8, fd: Option) -> Result<()>; fn get_protocol_features(&self) -> Result; fn set_protocol_features(&self, features: u64) -> Result<()>; fn get_queue_num(&self) -> Result; fn set_vring_enable(&self, index: u32, enable: bool) -> Result<()>; fn get_config(&self, offset: u32, size: u32, flags: VhostUserConfigFlags) -> Result>; fn set_config(&self, offset: u32, buf: &[u8], flags: VhostUserConfigFlags) -> Result<()>; fn set_backend_req_fd(&self, _backend: Backend) {} fn get_inflight_fd(&self, inflight: &VhostUserInflight) -> Result<(VhostUserInflight, File)>; fn set_inflight_fd(&self, inflight: &VhostUserInflight, file: File) -> Result<()>; fn get_max_mem_slots(&self) -> Result; fn add_mem_region(&self, region: &VhostUserSingleMemoryRegion, fd: File) -> Result<()>; fn remove_mem_region(&self, region: &VhostUserSingleMemoryRegion) -> Result<()>; } /// Services provided to the frontend by the backend without interior mutability. /// /// This is a helper trait mirroring the [VhostUserBackendReqHandler] trait. #[allow(missing_docs)] pub trait VhostUserBackendReqHandlerMut { fn set_owner(&mut self) -> Result<()>; fn reset_owner(&mut self) -> Result<()>; fn get_features(&mut self) -> Result; fn set_features(&mut self, features: u64) -> Result<()>; fn set_mem_table(&mut self, ctx: &[VhostUserMemoryRegion], files: Vec) -> Result<()>; fn set_vring_num(&mut self, index: u32, num: u32) -> Result<()>; fn set_vring_addr( &mut self, index: u32, flags: VhostUserVringAddrFlags, descriptor: u64, used: u64, available: u64, log: u64, ) -> Result<()>; fn set_vring_base(&mut self, index: u32, base: u32) -> Result<()>; fn get_vring_base(&mut self, index: u32) -> Result; fn set_vring_kick(&mut self, index: u8, fd: Option) -> Result<()>; fn set_vring_call(&mut self, index: u8, fd: Option) -> Result<()>; fn set_vring_err(&mut self, index: u8, fd: Option) -> Result<()>; fn get_protocol_features(&mut self) -> Result; fn set_protocol_features(&mut self, features: u64) -> Result<()>; fn get_queue_num(&mut self) -> Result; fn set_vring_enable(&mut self, index: u32, enable: bool) -> Result<()>; fn get_config( &mut self, offset: u32, size: u32, flags: VhostUserConfigFlags, ) -> Result>; fn set_config(&mut self, offset: u32, buf: &[u8], flags: VhostUserConfigFlags) -> Result<()>; fn set_backend_req_fd(&mut self, _backend: Backend) {} fn get_inflight_fd( &mut self, inflight: &VhostUserInflight, ) -> Result<(VhostUserInflight, File)>; fn set_inflight_fd(&mut self, inflight: &VhostUserInflight, file: File) -> Result<()>; fn get_max_mem_slots(&mut self) -> Result; fn add_mem_region(&mut self, region: &VhostUserSingleMemoryRegion, fd: File) -> Result<()>; fn remove_mem_region(&mut self, region: &VhostUserSingleMemoryRegion) -> Result<()>; } impl VhostUserBackendReqHandler for Mutex { fn set_owner(&self) -> Result<()> { self.lock().unwrap().set_owner() } fn reset_owner(&self) -> Result<()> { self.lock().unwrap().reset_owner() } fn get_features(&self) -> Result { self.lock().unwrap().get_features() } fn set_features(&self, features: u64) -> Result<()> { self.lock().unwrap().set_features(features) } fn set_mem_table(&self, ctx: &[VhostUserMemoryRegion], files: Vec) -> Result<()> { self.lock().unwrap().set_mem_table(ctx, files) } fn set_vring_num(&self, index: u32, num: u32) -> Result<()> { self.lock().unwrap().set_vring_num(index, num) } fn set_vring_addr( &self, index: u32, flags: VhostUserVringAddrFlags, descriptor: u64, used: u64, available: u64, log: u64, ) -> Result<()> { self.lock() .unwrap() .set_vring_addr(index, flags, descriptor, used, available, log) } fn set_vring_base(&self, index: u32, base: u32) -> Result<()> { self.lock().unwrap().set_vring_base(index, base) } fn get_vring_base(&self, index: u32) -> Result { self.lock().unwrap().get_vring_base(index) } fn set_vring_kick(&self, index: u8, fd: Option) -> Result<()> { self.lock().unwrap().set_vring_kick(index, fd) } fn set_vring_call(&self, index: u8, fd: Option) -> Result<()> { self.lock().unwrap().set_vring_call(index, fd) } fn set_vring_err(&self, index: u8, fd: Option) -> Result<()> { self.lock().unwrap().set_vring_err(index, fd) } fn get_protocol_features(&self) -> Result { self.lock().unwrap().get_protocol_features() } fn set_protocol_features(&self, features: u64) -> Result<()> { self.lock().unwrap().set_protocol_features(features) } fn get_queue_num(&self) -> Result { self.lock().unwrap().get_queue_num() } fn set_vring_enable(&self, index: u32, enable: bool) -> Result<()> { self.lock().unwrap().set_vring_enable(index, enable) } fn get_config(&self, offset: u32, size: u32, flags: VhostUserConfigFlags) -> Result> { self.lock().unwrap().get_config(offset, size, flags) } fn set_config(&self, offset: u32, buf: &[u8], flags: VhostUserConfigFlags) -> Result<()> { self.lock().unwrap().set_config(offset, buf, flags) } fn set_backend_req_fd(&self, backend: Backend) { self.lock().unwrap().set_backend_req_fd(backend) } fn get_inflight_fd(&self, inflight: &VhostUserInflight) -> Result<(VhostUserInflight, File)> { self.lock().unwrap().get_inflight_fd(inflight) } fn set_inflight_fd(&self, inflight: &VhostUserInflight, file: File) -> Result<()> { self.lock().unwrap().set_inflight_fd(inflight, file) } fn get_max_mem_slots(&self) -> Result { self.lock().unwrap().get_max_mem_slots() } fn add_mem_region(&self, region: &VhostUserSingleMemoryRegion, fd: File) -> Result<()> { self.lock().unwrap().add_mem_region(region, fd) } fn remove_mem_region(&self, region: &VhostUserSingleMemoryRegion) -> Result<()> { self.lock().unwrap().remove_mem_region(region) } } /// Server to handle service requests from frontends from the frontend communication channel. /// /// The [BackendReqHandler] acts as a server on the backend side, to handle service requests from /// frontends on the frontend communication channel. It's actually a proxy invoking the registered /// handler implementing [VhostUserBackendReqHandler] to do the real work. /// /// The lifetime of the BackendReqHandler object should be the same as the underline Unix Domain /// Socket, so it gets simpler to recover from disconnect. /// /// [VhostUserBackendReqHandler]: trait.VhostUserBackendReqHandler.html /// [BackendReqHandler]: struct.BackendReqHandler.html pub struct BackendReqHandler { // underlying Unix domain socket for communication main_sock: Endpoint, // the vhost-user backend device object backend: Arc, virtio_features: u64, acked_virtio_features: u64, protocol_features: VhostUserProtocolFeatures, acked_protocol_features: u64, // sending ack for messages without payload reply_ack_enabled: bool, // whether the endpoint has encountered any failure error: Option, } impl BackendReqHandler { /// Create a vhost-user backend endpoint. pub(super) fn new(main_sock: Endpoint, backend: Arc) -> Self { BackendReqHandler { main_sock, backend, virtio_features: 0, acked_virtio_features: 0, protocol_features: VhostUserProtocolFeatures::empty(), acked_protocol_features: 0, reply_ack_enabled: false, error: None, } } fn check_feature(&self, feat: VhostUserVirtioFeatures) -> Result<()> { if self.acked_virtio_features & feat.bits() != 0 { Ok(()) } else { Err(Error::InactiveFeature(feat)) } } fn check_proto_feature(&self, feat: VhostUserProtocolFeatures) -> Result<()> { if self.acked_protocol_features & feat.bits() != 0 { Ok(()) } else { Err(Error::InactiveOperation(feat)) } } /// Create a vhost-user backend endpoint from a connected socket. pub fn from_stream(socket: UnixStream, backend: Arc) -> Self { Self::new(Endpoint::from_stream(socket), backend) } /// Create a new vhost-user backend endpoint. /// /// # Arguments /// * - `path` - path of Unix domain socket listener to connect to /// * - `backend` - handler for requests from the frontend to the backend pub fn connect(path: &str, backend: Arc) -> Result { Ok(Self::new(Endpoint::::connect(path)?, backend)) } /// Mark endpoint as failed with specified error code. pub fn set_failed(&mut self, error: i32) { self.error = Some(error); } /// Main entrance to server backend request from the backend communication channel. /// /// Receive and handle one incoming request message from the frontend. The caller needs to: /// - serialize calls to this function /// - decide what to do when error happens /// - optional recover from failure pub fn handle_request(&mut self) -> Result<()> { // Return error if the endpoint is already in failed state. self.check_state()?; // The underlying communication channel is a Unix domain socket in // stream mode, and recvmsg() is a little tricky here. To successfully // receive attached file descriptors, we need to receive messages and // corresponding attached file descriptors in this way: // . recv messsage header and optional attached file // . validate message header // . recv optional message body and payload according size field in // message header // . validate message body and optional payload let (hdr, files) = self.main_sock.recv_header()?; self.check_attached_files(&hdr, &files)?; let (size, buf) = match hdr.get_size() { 0 => (0, vec![0u8; 0]), len => { let (size2, rbuf) = self.main_sock.recv_data(len as usize)?; if size2 != len as usize { return Err(Error::InvalidMessage); } (size2, rbuf) } }; match hdr.get_code() { Ok(FrontendReq::SET_OWNER) => { self.check_request_size(&hdr, size, 0)?; let res = self.backend.set_owner(); self.send_ack_message(&hdr, res)?; } Ok(FrontendReq::RESET_OWNER) => { self.check_request_size(&hdr, size, 0)?; let res = self.backend.reset_owner(); self.send_ack_message(&hdr, res)?; } Ok(FrontendReq::GET_FEATURES) => { self.check_request_size(&hdr, size, 0)?; let features = self.backend.get_features()?; let msg = VhostUserU64::new(features); self.send_reply_message(&hdr, &msg)?; self.virtio_features = features; self.update_reply_ack_flag(); } Ok(FrontendReq::SET_FEATURES) => { let msg = self.extract_request_body::(&hdr, size, &buf)?; let res = self.backend.set_features(msg.value); self.acked_virtio_features = msg.value; self.update_reply_ack_flag(); self.send_ack_message(&hdr, res)?; } Ok(FrontendReq::SET_MEM_TABLE) => { let res = self.set_mem_table(&hdr, size, &buf, files); self.send_ack_message(&hdr, res)?; } Ok(FrontendReq::SET_VRING_NUM) => { let msg = self.extract_request_body::(&hdr, size, &buf)?; let res = self.backend.set_vring_num(msg.index, msg.num); self.send_ack_message(&hdr, res)?; } Ok(FrontendReq::SET_VRING_ADDR) => { let msg = self.extract_request_body::(&hdr, size, &buf)?; let flags = match VhostUserVringAddrFlags::from_bits(msg.flags) { Some(val) => val, None => return Err(Error::InvalidMessage), }; let res = self.backend.set_vring_addr( msg.index, flags, msg.descriptor, msg.used, msg.available, msg.log, ); self.send_ack_message(&hdr, res)?; } Ok(FrontendReq::SET_VRING_BASE) => { let msg = self.extract_request_body::(&hdr, size, &buf)?; let res = self.backend.set_vring_base(msg.index, msg.num); self.send_ack_message(&hdr, res)?; } Ok(FrontendReq::GET_VRING_BASE) => { let msg = self.extract_request_body::(&hdr, size, &buf)?; let reply = self.backend.get_vring_base(msg.index)?; self.send_reply_message(&hdr, &reply)?; } Ok(FrontendReq::SET_VRING_CALL) => { self.check_request_size(&hdr, size, mem::size_of::())?; let (index, file) = self.handle_vring_fd_request(&buf, files)?; let res = self.backend.set_vring_call(index, file); self.send_ack_message(&hdr, res)?; } Ok(FrontendReq::SET_VRING_KICK) => { self.check_request_size(&hdr, size, mem::size_of::())?; let (index, file) = self.handle_vring_fd_request(&buf, files)?; let res = self.backend.set_vring_kick(index, file); self.send_ack_message(&hdr, res)?; } Ok(FrontendReq::SET_VRING_ERR) => { self.check_request_size(&hdr, size, mem::size_of::())?; let (index, file) = self.handle_vring_fd_request(&buf, files)?; let res = self.backend.set_vring_err(index, file); self.send_ack_message(&hdr, res)?; } Ok(FrontendReq::GET_PROTOCOL_FEATURES) => { self.check_request_size(&hdr, size, 0)?; let features = self.backend.get_protocol_features()?; // Enable the `XEN_MMAP` protocol feature for backends if xen feature is enabled. #[cfg(feature = "xen")] let features = features | VhostUserProtocolFeatures::XEN_MMAP; let msg = VhostUserU64::new(features.bits()); self.send_reply_message(&hdr, &msg)?; self.protocol_features = features; self.update_reply_ack_flag(); } Ok(FrontendReq::SET_PROTOCOL_FEATURES) => { let msg = self.extract_request_body::(&hdr, size, &buf)?; let res = self.backend.set_protocol_features(msg.value); self.acked_protocol_features = msg.value; self.update_reply_ack_flag(); self.send_ack_message(&hdr, res)?; #[cfg(feature = "xen")] self.check_proto_feature(VhostUserProtocolFeatures::XEN_MMAP)?; } Ok(FrontendReq::GET_QUEUE_NUM) => { self.check_proto_feature(VhostUserProtocolFeatures::MQ)?; self.check_request_size(&hdr, size, 0)?; let num = self.backend.get_queue_num()?; let msg = VhostUserU64::new(num); self.send_reply_message(&hdr, &msg)?; } Ok(FrontendReq::SET_VRING_ENABLE) => { let msg = self.extract_request_body::(&hdr, size, &buf)?; self.check_feature(VhostUserVirtioFeatures::PROTOCOL_FEATURES)?; let enable = match msg.num { 1 => true, 0 => false, _ => return Err(Error::InvalidParam), }; let res = self.backend.set_vring_enable(msg.index, enable); self.send_ack_message(&hdr, res)?; } Ok(FrontendReq::GET_CONFIG) => { self.check_proto_feature(VhostUserProtocolFeatures::CONFIG)?; self.check_request_size(&hdr, size, hdr.get_size() as usize)?; self.get_config(&hdr, &buf)?; } Ok(FrontendReq::SET_CONFIG) => { self.check_proto_feature(VhostUserProtocolFeatures::CONFIG)?; self.check_request_size(&hdr, size, hdr.get_size() as usize)?; let res = self.set_config(size, &buf); self.send_ack_message(&hdr, res)?; } Ok(FrontendReq::SET_BACKEND_REQ_FD) => { self.check_proto_feature(VhostUserProtocolFeatures::BACKEND_REQ)?; self.check_request_size(&hdr, size, hdr.get_size() as usize)?; let res = self.set_backend_req_fd(files); self.send_ack_message(&hdr, res)?; } Ok(FrontendReq::GET_INFLIGHT_FD) => { self.check_proto_feature(VhostUserProtocolFeatures::INFLIGHT_SHMFD)?; let msg = self.extract_request_body::(&hdr, size, &buf)?; let (inflight, file) = self.backend.get_inflight_fd(&msg)?; let reply_hdr = self.new_reply_header::(&hdr, 0)?; self.main_sock .send_message(&reply_hdr, &inflight, Some(&[file.as_raw_fd()]))?; } Ok(FrontendReq::SET_INFLIGHT_FD) => { self.check_proto_feature(VhostUserProtocolFeatures::INFLIGHT_SHMFD)?; let file = take_single_file(files).ok_or(Error::IncorrectFds)?; let msg = self.extract_request_body::(&hdr, size, &buf)?; let res = self.backend.set_inflight_fd(&msg, file); self.send_ack_message(&hdr, res)?; } Ok(FrontendReq::GET_MAX_MEM_SLOTS) => { self.check_proto_feature(VhostUserProtocolFeatures::CONFIGURE_MEM_SLOTS)?; self.check_request_size(&hdr, size, 0)?; let num = self.backend.get_max_mem_slots()?; let msg = VhostUserU64::new(num); self.send_reply_message(&hdr, &msg)?; } Ok(FrontendReq::ADD_MEM_REG) => { self.check_proto_feature(VhostUserProtocolFeatures::CONFIGURE_MEM_SLOTS)?; let mut files = files.ok_or(Error::InvalidParam)?; if files.len() != 1 { return Err(Error::InvalidParam); } let msg = self.extract_request_body::(&hdr, size, &buf)?; let res = self.backend.add_mem_region(&msg, files.swap_remove(0)); self.send_ack_message(&hdr, res)?; } Ok(FrontendReq::REM_MEM_REG) => { self.check_proto_feature(VhostUserProtocolFeatures::CONFIGURE_MEM_SLOTS)?; let msg = self.extract_request_body::(&hdr, size, &buf)?; let res = self.backend.remove_mem_region(&msg); self.send_ack_message(&hdr, res)?; } _ => { return Err(Error::InvalidMessage); } } Ok(()) } fn set_mem_table( &mut self, hdr: &VhostUserMsgHeader, size: usize, buf: &[u8], files: Option>, ) -> Result<()> { self.check_request_size(hdr, size, hdr.get_size() as usize)?; // check message size is consistent let hdrsize = mem::size_of::(); if size < hdrsize { return Err(Error::InvalidMessage); } // SAFETY: Safe because we checked that `buf` size is at least that of // VhostUserMemory. let msg = unsafe { &*(buf.as_ptr() as *const VhostUserMemory) }; if !msg.is_valid() { return Err(Error::InvalidMessage); } if size != hdrsize + msg.num_regions as usize * mem::size_of::() { return Err(Error::InvalidMessage); } // validate number of fds matching number of memory regions let files = files.ok_or(Error::InvalidMessage)?; if files.len() != msg.num_regions as usize { return Err(Error::InvalidMessage); } // Validate memory regions // // SAFETY: Safe because we checked that `buf` size is equal to that of // VhostUserMemory, plus `msg.num_regions` elements of VhostUserMemoryRegion. let regions = unsafe { slice::from_raw_parts( buf.as_ptr().add(hdrsize) as *const VhostUserMemoryRegion, msg.num_regions as usize, ) }; for region in regions.iter() { if !region.is_valid() { return Err(Error::InvalidMessage); } } self.backend.set_mem_table(regions, files) } fn get_config(&mut self, hdr: &VhostUserMsgHeader, buf: &[u8]) -> Result<()> { let payload_offset = mem::size_of::(); if buf.len() > MAX_MSG_SIZE || buf.len() < payload_offset { return Err(Error::InvalidMessage); } // SAFETY: Safe because we checked that `buf` size is at least that of VhostUserConfig. let msg = unsafe { std::ptr::read_unaligned(buf.as_ptr() as *const VhostUserConfig) }; if !msg.is_valid() { return Err(Error::InvalidMessage); } if buf.len() - payload_offset != msg.size as usize { return Err(Error::InvalidMessage); } let flags = match VhostUserConfigFlags::from_bits(msg.flags) { Some(val) => val, None => return Err(Error::InvalidMessage), }; let res = self.backend.get_config(msg.offset, msg.size, flags); // vhost-user backend's payload size MUST match frontend's request // on success, uses zero length of payload to indicate an error // to vhost-user frontend. match res { Ok(ref buf) if buf.len() == msg.size as usize => { let reply = VhostUserConfig::new(msg.offset, buf.len() as u32, flags); self.send_reply_with_payload(hdr, &reply, buf.as_slice())?; } Ok(_) => { let reply = VhostUserConfig::new(msg.offset, 0, flags); self.send_reply_message(hdr, &reply)?; } Err(_) => { let reply = VhostUserConfig::new(msg.offset, 0, flags); self.send_reply_message(hdr, &reply)?; } } Ok(()) } fn set_config(&mut self, size: usize, buf: &[u8]) -> Result<()> { if size > MAX_MSG_SIZE || size < mem::size_of::() { return Err(Error::InvalidMessage); } // SAFETY: Safe because we checked that `buf` size is at least that of VhostUserConfig. let msg = unsafe { std::ptr::read_unaligned(buf.as_ptr() as *const VhostUserConfig) }; if !msg.is_valid() { return Err(Error::InvalidMessage); } if size - mem::size_of::() != msg.size as usize { return Err(Error::InvalidMessage); } let flags = VhostUserConfigFlags::from_bits(msg.flags).ok_or(Error::InvalidMessage)?; self.backend .set_config(msg.offset, &buf[mem::size_of::()..], flags) } fn set_backend_req_fd(&mut self, files: Option>) -> Result<()> { let file = take_single_file(files).ok_or(Error::InvalidMessage)?; // SAFETY: Safe because we have ownership of the files that were // checked when received. We have to trust that they are Unix sockets // since we have no way to check this. If not, it will fail later. let sock = unsafe { UnixStream::from_raw_fd(file.into_raw_fd()) }; let backend = Backend::from_stream(sock); self.backend.set_backend_req_fd(backend); Ok(()) } fn handle_vring_fd_request( &mut self, buf: &[u8], files: Option>, ) -> Result<(u8, Option)> { if buf.len() > MAX_MSG_SIZE || buf.len() < mem::size_of::() { return Err(Error::InvalidMessage); } // SAFETY: Safe because we checked that `buf` size is at least that of VhostUserU64. let msg = unsafe { std::ptr::read_unaligned(buf.as_ptr() as *const VhostUserU64) }; if !msg.is_valid() { return Err(Error::InvalidMessage); } // Bits (0-7) of the payload contain the vring index. Bit 8 is the // invalid FD flag. This bit is set when there is no file descriptor // in the ancillary data. This signals that polling will be used // instead of waiting for the call. // If Bit 8 is unset, the data must contain a file descriptor. let has_fd = (msg.value & 0x100u64) == 0; let file = take_single_file(files); if has_fd && file.is_none() || !has_fd && file.is_some() { return Err(Error::InvalidMessage); } Ok((msg.value as u8, file)) } fn check_state(&self) -> Result<()> { match self.error { Some(e) => Err(Error::SocketBroken(std::io::Error::from_raw_os_error(e))), None => Ok(()), } } fn check_request_size( &self, hdr: &VhostUserMsgHeader, size: usize, expected: usize, ) -> Result<()> { if hdr.get_size() as usize != expected || hdr.is_reply() || hdr.get_version() != 0x1 || size != expected { return Err(Error::InvalidMessage); } Ok(()) } fn check_attached_files( &self, hdr: &VhostUserMsgHeader, files: &Option>, ) -> Result<()> { match hdr.get_code() { Ok( FrontendReq::SET_MEM_TABLE | FrontendReq::SET_VRING_CALL | FrontendReq::SET_VRING_KICK | FrontendReq::SET_VRING_ERR | FrontendReq::SET_LOG_BASE | FrontendReq::SET_LOG_FD | FrontendReq::SET_BACKEND_REQ_FD | FrontendReq::SET_INFLIGHT_FD | FrontendReq::ADD_MEM_REG, ) => Ok(()), _ if files.is_some() => Err(Error::InvalidMessage), _ => Ok(()), } } fn extract_request_body( &self, hdr: &VhostUserMsgHeader, size: usize, buf: &[u8], ) -> Result { self.check_request_size(hdr, size, mem::size_of::())?; // SAFETY: Safe because we checked that `buf` size is equal to T size. let msg = unsafe { std::ptr::read_unaligned(buf.as_ptr() as *const T) }; if !msg.is_valid() { return Err(Error::InvalidMessage); } Ok(msg) } fn update_reply_ack_flag(&mut self) { let vflag = VhostUserVirtioFeatures::PROTOCOL_FEATURES.bits(); let pflag = VhostUserProtocolFeatures::REPLY_ACK; self.reply_ack_enabled = (self.virtio_features & vflag) != 0 && self.protocol_features.contains(pflag) && (self.acked_protocol_features & pflag.bits()) != 0; } fn new_reply_header( &self, req: &VhostUserMsgHeader, payload_size: usize, ) -> Result> { if mem::size_of::() > MAX_MSG_SIZE || payload_size > MAX_MSG_SIZE || mem::size_of::() + payload_size > MAX_MSG_SIZE { return Err(Error::InvalidParam); } self.check_state()?; Ok(VhostUserMsgHeader::new( req.get_code()?, VhostUserHeaderFlag::REPLY.bits(), (mem::size_of::() + payload_size) as u32, )) } fn send_ack_message( &mut self, req: &VhostUserMsgHeader, res: Result<()>, ) -> Result<()> { if self.reply_ack_enabled && req.is_need_reply() { let hdr = self.new_reply_header::(req, 0)?; let val = match res { Ok(_) => 0, Err(_) => 1, }; let msg = VhostUserU64::new(val); self.main_sock.send_message(&hdr, &msg, None)?; } res } fn send_reply_message( &mut self, req: &VhostUserMsgHeader, msg: &T, ) -> Result<()> { let hdr = self.new_reply_header::(req, 0)?; self.main_sock.send_message(&hdr, msg, None)?; Ok(()) } fn send_reply_with_payload( &mut self, req: &VhostUserMsgHeader, msg: &T, payload: &[u8], ) -> Result<()> { let hdr = self.new_reply_header::(req, payload.len())?; self.main_sock .send_message_with_payload(&hdr, msg, payload, None)?; Ok(()) } } impl AsRawFd for BackendReqHandler { fn as_raw_fd(&self) -> RawFd { self.main_sock.as_raw_fd() } } #[cfg(test)] mod tests { use std::os::unix::io::AsRawFd; use super::*; use crate::vhost_user::dummy_backend::DummyBackendReqHandler; #[test] fn test_backend_req_handler_new() { let (p1, _p2) = UnixStream::pair().unwrap(); let endpoint = Endpoint::::from_stream(p1); let backend = Arc::new(Mutex::new(DummyBackendReqHandler::new())); let mut handler = BackendReqHandler::new(endpoint, backend); handler.check_state().unwrap(); handler.set_failed(libc::EAGAIN); handler.check_state().unwrap_err(); assert!(handler.as_raw_fd() >= 0); } } vhost-0.10.0/src/vhost_user/connection.rs000064400000000000000000001016671046102023000165550ustar 00000000000000// Copyright (C) 2019 Alibaba Cloud Computing. All rights reserved. // SPDX-License-Identifier: Apache-2.0 //! Structs for Unix Domain Socket listener and endpoint. #![allow(dead_code)] use std::fs::File; use std::io::ErrorKind; use std::marker::PhantomData; use std::os::unix::io::{AsRawFd, FromRawFd, RawFd}; use std::os::unix::net::{UnixListener, UnixStream}; use std::path::{Path, PathBuf}; use std::{mem, slice}; use libc::{c_void, iovec}; use vm_memory::ByteValued; use vmm_sys_util::sock_ctrl_msg::ScmSocket; use super::message::*; use super::{Error, Result}; /// Unix domain socket listener for accepting incoming connections. pub struct Listener { fd: UnixListener, path: Option, } impl Listener { /// Create a unix domain socket listener. /// /// # Return: /// * - the new Listener object on success. /// * - SocketError: failed to create listener socket. pub fn new>(path: P, unlink: bool) -> Result { if unlink { let _ = std::fs::remove_file(&path); } let fd = UnixListener::bind(&path).map_err(Error::SocketError)?; Ok(Listener { fd, path: Some(path.as_ref().to_owned()), }) } /// Accept an incoming connection. /// /// # Return: /// * - Some(UnixStream): new UnixStream object if new incoming connection is available. /// * - None: no incoming connection available. /// * - SocketError: errors from accept(). pub fn accept(&self) -> Result> { loop { match self.fd.accept() { Ok((socket, _addr)) => return Ok(Some(socket)), Err(e) => { match e.kind() { // No incoming connection available. ErrorKind::WouldBlock => return Ok(None), // New connection closed by peer. ErrorKind::ConnectionAborted => return Ok(None), // Interrupted by signals, retry ErrorKind::Interrupted => continue, _ => return Err(Error::SocketError(e)), } } } } } /// Change blocking status on the listener. /// /// # Return: /// * - () on success. /// * - SocketError: failure from set_nonblocking(). pub fn set_nonblocking(&self, block: bool) -> Result<()> { self.fd.set_nonblocking(block).map_err(Error::SocketError) } } impl AsRawFd for Listener { fn as_raw_fd(&self) -> RawFd { self.fd.as_raw_fd() } } impl FromRawFd for Listener { unsafe fn from_raw_fd(fd: RawFd) -> Self { Listener { fd: UnixListener::from_raw_fd(fd), path: None, } } } impl Drop for Listener { fn drop(&mut self) { if let Some(path) = &self.path { let _ = std::fs::remove_file(path); } } } /// Unix domain socket endpoint for vhost-user connection. pub(super) struct Endpoint { sock: UnixStream, _r: PhantomData, } impl Endpoint { /// Create a new stream by connecting to server at `str`. /// /// # Return: /// * - the new Endpoint object on success. /// * - SocketConnect: failed to connect to peer. pub fn connect>(path: P) -> Result { let sock = UnixStream::connect(path).map_err(Error::SocketConnect)?; Ok(Self::from_stream(sock)) } /// Create an endpoint from a stream object. pub fn from_stream(sock: UnixStream) -> Self { Endpoint { sock, _r: PhantomData, } } /// Sends bytes from scatter-gather vectors over the socket with optional attached file /// descriptors. /// /// # Return: /// * - number of bytes sent on success /// * - SocketRetry: temporary error caused by signals or short of resources. /// * - SocketBroken: the underline socket is broken. /// * - SocketError: other socket related errors. pub fn send_iovec(&mut self, iovs: &[&[u8]], fds: Option<&[RawFd]>) -> Result { let rfds = match fds { Some(rfds) => rfds, _ => &[], }; self.sock.send_with_fds(iovs, rfds).map_err(Into::into) } /// Sends all bytes from scatter-gather vectors over the socket with optional attached file /// descriptors. Will loop until all data has been transfered. /// /// # Return: /// * - number of bytes sent on success /// * - SocketBroken: the underline socket is broken. /// * - SocketError: other socket related errors. pub fn send_iovec_all(&mut self, iovs: &[&[u8]], fds: Option<&[RawFd]>) -> Result { let mut data_sent = 0; let mut data_total = 0; let iov_lens: Vec = iovs.iter().map(|iov| iov.len()).collect(); for len in &iov_lens { data_total += len; } while (data_total - data_sent) > 0 { let (nr_skip, offset) = get_sub_iovs_offset(&iov_lens, data_sent); let iov = &iovs[nr_skip][offset..]; let data = &[&[iov], &iovs[(nr_skip + 1)..]].concat(); let sfds = if data_sent == 0 { fds } else { None }; let sent = self.send_iovec(data, sfds); match sent { Ok(0) => return Ok(data_sent), Ok(n) => data_sent += n, Err(e) => match e { Error::SocketRetry(_) => {} _ => return Err(e), }, } } Ok(data_sent) } /// Sends bytes from a slice over the socket with optional attached file descriptors. /// /// # Return: /// * - number of bytes sent on success /// * - SocketRetry: temporary error caused by signals or short of resources. /// * - SocketBroken: the underline socket is broken. /// * - SocketError: other socket related errors. pub fn send_slice(&mut self, data: &[u8], fds: Option<&[RawFd]>) -> Result { self.send_iovec(&[data], fds) } /// Sends a header-only message with optional attached file descriptors. /// /// # Return: /// * - number of bytes sent on success /// * - SocketRetry: temporary error caused by signals or short of resources. /// * - SocketBroken: the underline socket is broken. /// * - SocketError: other socket related errors. /// * - PartialMessage: received a partial message. pub fn send_header( &mut self, hdr: &VhostUserMsgHeader, fds: Option<&[RawFd]>, ) -> Result<()> { // SAFETY: Safe because there can't be other mutable referance to hdr. let iovs = unsafe { [slice::from_raw_parts( hdr as *const VhostUserMsgHeader as *const u8, mem::size_of::>(), )] }; let bytes = self.send_iovec_all(&iovs[..], fds)?; if bytes != mem::size_of::>() { return Err(Error::PartialMessage); } Ok(()) } /// Send a message with header and body. Optional file descriptors may be attached to /// the message. /// /// # Return: /// * - number of bytes sent on success /// * - SocketRetry: temporary error caused by signals or short of resources. /// * - SocketBroken: the underline socket is broken. /// * - SocketError: other socket related errors. /// * - PartialMessage: received a partial message. pub fn send_message( &mut self, hdr: &VhostUserMsgHeader, body: &T, fds: Option<&[RawFd]>, ) -> Result<()> { if mem::size_of::() > MAX_MSG_SIZE { return Err(Error::OversizedMsg); } let bytes = self.send_iovec_all(&[hdr.as_slice(), body.as_slice()], fds)?; if bytes != mem::size_of::>() + mem::size_of::() { return Err(Error::PartialMessage); } Ok(()) } /// Send a message with header, body and payload. Optional file descriptors /// may also be attached to the message. /// /// # Return: /// * - number of bytes sent on success /// * - SocketRetry: temporary error caused by signals or short of resources. /// * - SocketBroken: the underline socket is broken. /// * - SocketError: other socket related errors. /// * - OversizedMsg: message size is too big. /// * - PartialMessage: received a partial message. /// * - IncorrectFds: wrong number of attached fds. pub fn send_message_with_payload( &mut self, hdr: &VhostUserMsgHeader, body: &T, payload: &[u8], fds: Option<&[RawFd]>, ) -> Result<()> { let len = payload.len(); if mem::size_of::() > MAX_MSG_SIZE { return Err(Error::OversizedMsg); } if len > MAX_MSG_SIZE - mem::size_of::() { return Err(Error::OversizedMsg); } if let Some(fd_arr) = fds { if fd_arr.len() > MAX_ATTACHED_FD_ENTRIES { return Err(Error::IncorrectFds); } } let total = mem::size_of::>() + mem::size_of::() + len; let len = self.send_iovec_all(&[hdr.as_slice(), body.as_slice(), payload], fds)?; if len != total { return Err(Error::PartialMessage); } Ok(()) } /// Reads bytes from the socket into the given scatter/gather vectors. /// /// # Return: /// * - (number of bytes received, buf) on success /// * - SocketRetry: temporary error caused by signals or short of resources. /// * - SocketBroken: the underline socket is broken. /// * - SocketError: other socket related errors. pub fn recv_data(&mut self, len: usize) -> Result<(usize, Vec)> { let mut rbuf = vec![0u8; len]; let mut iovs = [iovec { iov_base: rbuf.as_mut_ptr() as *mut c_void, iov_len: len, }]; // SAFETY: Safe because we own rbuf and it's safe to fill a byte array with arbitrary data. let (bytes, _) = unsafe { self.sock.recv_with_fds(&mut iovs, &mut [])? }; Ok((bytes, rbuf)) } /// Reads bytes from the socket into the given scatter/gather vectors with optional attached /// file. /// /// The underlying communication channel is a Unix domain socket in STREAM mode. It's a little /// tricky to pass file descriptors through such a communication channel. Let's assume that a /// sender sending a message with some file descriptors attached. To successfully receive those /// attached file descriptors, the receiver must obey following rules: /// 1) file descriptors are attached to a message. /// 2) message(packet) boundaries must be respected on the receive side. /// In other words, recvmsg() operations must not cross the packet boundary, otherwise the /// attached file descriptors will get lost. /// Note that this function wraps received file descriptors as `File`. /// /// # Return: /// * - (number of bytes received, [received files]) on success /// * - SocketRetry: temporary error caused by signals or short of resources. /// * - SocketBroken: the underline socket is broken. /// * - SocketError: other socket related errors. /// /// # Safety /// /// It is the callers responsibility to ensure it is safe for arbitrary data to be /// written to the iovec pointers. pub unsafe fn recv_into_iovec( &mut self, iovs: &mut [iovec], ) -> Result<(usize, Option>)> { let mut fd_array = vec![0; MAX_ATTACHED_FD_ENTRIES]; let (bytes, fds) = self.sock.recv_with_fds(iovs, &mut fd_array)?; let files = match fds { 0 => None, n => { let files = fd_array .iter() .take(n) .map(|fd| { // Safe because we have the ownership of `fd`. File::from_raw_fd(*fd) }) .collect(); Some(files) } }; Ok((bytes, files)) } /// Reads all bytes from the socket into the given scatter/gather vectors with optional /// attached files. Will loop until all data has been transferred. /// /// The underlying communication channel is a Unix domain socket in STREAM mode. It's a little /// tricky to pass file descriptors through such a communication channel. Let's assume that a /// sender sending a message with some file descriptors attached. To successfully receive those /// attached file descriptors, the receiver must obey following rules: /// 1) file descriptors are attached to a message. /// 2) message(packet) boundaries must be respected on the receive side. /// In other words, recvmsg() operations must not cross the packet boundary, otherwise the /// attached file descriptors will get lost. /// Note that this function wraps received file descriptors as `File`. /// /// # Return: /// * - (number of bytes received, [received fds]) on success /// * - SocketBroken: the underline socket is broken. /// * - SocketError: other socket related errors. /// /// # Safety /// /// It is the callers responsibility to ensure it is safe for arbitrary data to be /// written to the iovec pointers. pub unsafe fn recv_into_iovec_all( &mut self, iovs: &mut [iovec], ) -> Result<(usize, Option>)> { let mut data_read = 0; let mut data_total = 0; let mut rfds = None; let iov_lens: Vec = iovs.iter().map(|iov| iov.iov_len).collect(); for len in &iov_lens { data_total += len; } while (data_total - data_read) > 0 { let (nr_skip, offset) = get_sub_iovs_offset(&iov_lens, data_read); let iov = &mut iovs[nr_skip]; let mut data = [ &[iovec { iov_base: (iov.iov_base as usize + offset) as *mut c_void, iov_len: iov.iov_len - offset, }], &iovs[(nr_skip + 1)..], ] .concat(); let res = self.recv_into_iovec(&mut data); match res { Ok((0, _)) => return Ok((data_read, rfds)), Ok((n, fds)) => { if data_read == 0 { rfds = fds; } data_read += n; } Err(e) => match e { Error::SocketRetry(_) => {} _ => return Err(e), }, } } Ok((data_read, rfds)) } /// Reads bytes from the socket into a new buffer with optional attached /// files. Received file descriptors are set close-on-exec and converted to `File`. /// /// # Return: /// * - (number of bytes received, buf, [received files]) on success. /// * - SocketRetry: temporary error caused by signals or short of resources. /// * - SocketBroken: the underline socket is broken. /// * - SocketError: other socket related errors. pub fn recv_into_buf( &mut self, buf_size: usize, ) -> Result<(usize, Vec, Option>)> { let mut buf = vec![0u8; buf_size]; let (bytes, files) = { let mut iovs = [iovec { iov_base: buf.as_mut_ptr() as *mut c_void, iov_len: buf_size, }]; // SAFETY: Safe because we own buf and it's safe to fill a byte array with arbitrary data. unsafe { self.recv_into_iovec(&mut iovs)? } }; Ok((bytes, buf, files)) } /// Receive a header-only message with optional attached files. /// Note, only the first MAX_ATTACHED_FD_ENTRIES file descriptors will be /// accepted and all other file descriptor will be discard silently. /// /// # Return: /// * - (message header, [received files]) on success. /// * - SocketRetry: temporary error caused by signals or short of resources. /// * - SocketBroken: the underline socket is broken. /// * - SocketError: other socket related errors. /// * - PartialMessage: received a partial message. /// * - InvalidMessage: received a invalid message. pub fn recv_header(&mut self) -> Result<(VhostUserMsgHeader, Option>)> { let mut hdr = VhostUserMsgHeader::default(); let mut iovs = [iovec { iov_base: (&mut hdr as *mut VhostUserMsgHeader) as *mut c_void, iov_len: mem::size_of::>(), }]; // SAFETY: Safe because we own hdr and it's ByteValued. let (bytes, files) = unsafe { self.recv_into_iovec_all(&mut iovs[..])? }; if bytes == 0 { return Err(Error::Disconnected); } else if bytes != mem::size_of::>() { return Err(Error::PartialMessage); } else if !hdr.is_valid() { return Err(Error::InvalidMessage); } Ok((hdr, files)) } /// Receive a message with optional attached file descriptors. /// Note, only the first MAX_ATTACHED_FD_ENTRIES file descriptors will be /// accepted and all other file descriptor will be discard silently. /// /// # Return: /// * - (message header, message body, [received files]) on success. /// * - SocketRetry: temporary error caused by signals or short of resources. /// * - SocketBroken: the underline socket is broken. /// * - SocketError: other socket related errors. /// * - PartialMessage: received a partial message. /// * - InvalidMessage: received a invalid message. pub fn recv_body( &mut self, ) -> Result<(VhostUserMsgHeader, T, Option>)> { let mut hdr = VhostUserMsgHeader::default(); let mut body: T = Default::default(); let mut iovs = [ iovec { iov_base: (&mut hdr as *mut VhostUserMsgHeader) as *mut c_void, iov_len: mem::size_of::>(), }, iovec { iov_base: (&mut body as *mut T) as *mut c_void, iov_len: mem::size_of::(), }, ]; // SAFETY: Safe because we own hdr and body and they're ByteValued. let (bytes, files) = unsafe { self.recv_into_iovec_all(&mut iovs[..])? }; let total = mem::size_of::>() + mem::size_of::(); if bytes != total { return Err(Error::PartialMessage); } else if !hdr.is_valid() || !body.is_valid() { return Err(Error::InvalidMessage); } Ok((hdr, body, files)) } /// Receive a message with header and optional content. Callers need to /// pre-allocate a big enough buffer to receive the message body and /// optional payload. If there are attached file descriptor associated /// with the message, the first MAX_ATTACHED_FD_ENTRIES file descriptors /// will be accepted and all other file descriptor will be discard /// silently. /// /// # Return: /// * - (message header, message size, [received files]) on success. /// * - SocketRetry: temporary error caused by signals or short of resources. /// * - SocketBroken: the underline socket is broken. /// * - SocketError: other socket related errors. /// * - PartialMessage: received a partial message. /// * - InvalidMessage: received a invalid message. pub fn recv_body_into_buf( &mut self, buf: &mut [u8], ) -> Result<(VhostUserMsgHeader, usize, Option>)> { let mut hdr = VhostUserMsgHeader::default(); let mut iovs = [ iovec { iov_base: (&mut hdr as *mut VhostUserMsgHeader) as *mut c_void, iov_len: mem::size_of::>(), }, iovec { iov_base: buf.as_mut_ptr() as *mut c_void, iov_len: buf.len(), }, ]; // SAFETY: Safe because we own hdr and have a mutable borrow of buf, and hdr is ByteValued // and it's safe to fill a byte slice with arbitrary data. let (bytes, files) = unsafe { self.recv_into_iovec_all(&mut iovs[..])? }; if bytes < mem::size_of::>() { return Err(Error::PartialMessage); } else if !hdr.is_valid() { return Err(Error::InvalidMessage); } Ok((hdr, bytes - mem::size_of::>(), files)) } /// Receive a message with optional payload and attached file descriptors. /// Note, only the first MAX_ATTACHED_FD_ENTRIES file descriptors will be /// accepted and all other file descriptor will be discard silently. /// /// # Return: /// * - (message header, message body, size of payload, [received files]) on success. /// * - SocketRetry: temporary error caused by signals or short of resources. /// * - SocketBroken: the underline socket is broken. /// * - SocketError: other socket related errors. /// * - PartialMessage: received a partial message. /// * - InvalidMessage: received a invalid message. #[cfg_attr(feature = "cargo-clippy", allow(clippy::type_complexity))] pub fn recv_payload_into_buf( &mut self, buf: &mut [u8], ) -> Result<(VhostUserMsgHeader, T, usize, Option>)> { let mut hdr = VhostUserMsgHeader::default(); let mut body: T = Default::default(); let mut iovs = [ iovec { iov_base: (&mut hdr as *mut VhostUserMsgHeader) as *mut c_void, iov_len: mem::size_of::>(), }, iovec { iov_base: (&mut body as *mut T) as *mut c_void, iov_len: mem::size_of::(), }, iovec { iov_base: buf.as_mut_ptr() as *mut c_void, iov_len: buf.len(), }, ]; // SAFETY: Safe because we own hdr and body and have a mutable borrow of buf, and // hdr and body are ByteValued, and it's safe to fill a byte slice with // arbitrary data. let (bytes, files) = unsafe { self.recv_into_iovec_all(&mut iovs[..])? }; let total = mem::size_of::>() + mem::size_of::(); if bytes < total { return Err(Error::PartialMessage); } else if !hdr.is_valid() || !body.is_valid() { return Err(Error::InvalidMessage); } Ok((hdr, body, bytes - total, files)) } } impl AsRawFd for Endpoint { fn as_raw_fd(&self) -> RawFd { self.sock.as_raw_fd() } } // Given a slice of sizes and the `skip_size`, return the offset of `skip_size` in the slice. // For example: // let iov_lens = vec![4, 4, 5]; // let size = 6; // assert_eq!(get_sub_iovs_offset(&iov_len, size), (1, 2)); fn get_sub_iovs_offset(iov_lens: &[usize], skip_size: usize) -> (usize, usize) { let mut size = skip_size; let mut nr_skip = 0; for len in iov_lens { if size >= *len { size -= *len; nr_skip += 1; } else { break; } } (nr_skip, size) } #[cfg(test)] mod tests { use super::*; use std::io::{Read, Seek, SeekFrom, Write}; use vmm_sys_util::rand::rand_alphanumerics; use vmm_sys_util::tempfile::TempFile; fn temp_path() -> PathBuf { PathBuf::from(format!( "/tmp/vhost_test_{}", rand_alphanumerics(8).to_str().unwrap() )) } #[test] fn create_listener() { let path = temp_path(); let listener = Listener::new(path, true).unwrap(); assert!(listener.as_raw_fd() > 0); } #[test] fn create_listener_from_raw_fd() { let path = temp_path(); let file = File::create(path).unwrap(); // SAFETY: Safe because `file` contains a valid fd to a file just created. let listener = unsafe { Listener::from_raw_fd(file.as_raw_fd()) }; assert!(listener.as_raw_fd() > 0); } #[test] fn accept_connection() { let path = temp_path(); let listener = Listener::new(path, true).unwrap(); listener.set_nonblocking(true).unwrap(); // accept on a fd without incoming connection let conn = listener.accept().unwrap(); assert!(conn.is_none()); } #[test] fn send_data() { let path = temp_path(); let listener = Listener::new(&path, true).unwrap(); listener.set_nonblocking(true).unwrap(); let mut frontend = Endpoint::::connect(&path).unwrap(); let sock = listener.accept().unwrap().unwrap(); let mut backend = Endpoint::::from_stream(sock); let buf1 = [0x1, 0x2, 0x3, 0x4]; let mut len = frontend.send_slice(&buf1[..], None).unwrap(); assert_eq!(len, 4); let (bytes, buf2, _) = backend.recv_into_buf(0x1000).unwrap(); assert_eq!(bytes, 4); assert_eq!(&buf1[..], &buf2[..bytes]); len = frontend.send_slice(&buf1[..], None).unwrap(); assert_eq!(len, 4); let (bytes, buf2, _) = backend.recv_into_buf(0x2).unwrap(); assert_eq!(bytes, 2); assert_eq!(&buf1[..2], &buf2[..]); let (bytes, buf2, _) = backend.recv_into_buf(0x2).unwrap(); assert_eq!(bytes, 2); assert_eq!(&buf1[2..], &buf2[..]); } #[test] fn send_fd() { let path = temp_path(); let listener = Listener::new(&path, true).unwrap(); listener.set_nonblocking(true).unwrap(); let mut frontend = Endpoint::::connect(&path).unwrap(); let sock = listener.accept().unwrap().unwrap(); let mut backend = Endpoint::::from_stream(sock); let mut fd = TempFile::new().unwrap().into_file(); write!(fd, "test").unwrap(); // Normal case for sending/receiving file descriptors let buf1 = [0x1, 0x2, 0x3, 0x4]; let len = frontend .send_slice(&buf1[..], Some(&[fd.as_raw_fd()])) .unwrap(); assert_eq!(len, 4); let (bytes, buf2, files) = backend.recv_into_buf(4).unwrap(); assert_eq!(bytes, 4); assert_eq!(&buf1[..], &buf2[..]); assert!(files.is_some()); let files = files.unwrap(); { assert_eq!(files.len(), 1); let mut file = &files[0]; let mut content = String::new(); file.seek(SeekFrom::Start(0)).unwrap(); file.read_to_string(&mut content).unwrap(); assert_eq!(content, "test"); } // Following communication pattern should work: // Sending side: data(header, body) with fds // Receiving side: data(header) with fds, data(body) let len = frontend .send_slice( &buf1[..], Some(&[fd.as_raw_fd(), fd.as_raw_fd(), fd.as_raw_fd()]), ) .unwrap(); assert_eq!(len, 4); let (bytes, buf2, files) = backend.recv_into_buf(0x2).unwrap(); assert_eq!(bytes, 2); assert_eq!(&buf1[..2], &buf2[..]); assert!(files.is_some()); let files = files.unwrap(); { assert_eq!(files.len(), 3); let mut file = &files[1]; let mut content = String::new(); file.seek(SeekFrom::Start(0)).unwrap(); file.read_to_string(&mut content).unwrap(); assert_eq!(content, "test"); } let (bytes, buf2, files) = backend.recv_into_buf(0x2).unwrap(); assert_eq!(bytes, 2); assert_eq!(&buf1[2..], &buf2[..]); assert!(files.is_none()); // Following communication pattern should not work: // Sending side: data(header, body) with fds // Receiving side: data(header), data(body) with fds let len = frontend .send_slice( &buf1[..], Some(&[fd.as_raw_fd(), fd.as_raw_fd(), fd.as_raw_fd()]), ) .unwrap(); assert_eq!(len, 4); let (bytes, buf4) = backend.recv_data(2).unwrap(); assert_eq!(bytes, 2); assert_eq!(&buf1[..2], &buf4[..]); let (bytes, buf2, files) = backend.recv_into_buf(0x2).unwrap(); assert_eq!(bytes, 2); assert_eq!(&buf1[2..], &buf2[..]); assert!(files.is_none()); // Following communication pattern should work: // Sending side: data, data with fds // Receiving side: data, data with fds let len = frontend.send_slice(&buf1[..], None).unwrap(); assert_eq!(len, 4); let len = frontend .send_slice( &buf1[..], Some(&[fd.as_raw_fd(), fd.as_raw_fd(), fd.as_raw_fd()]), ) .unwrap(); assert_eq!(len, 4); let (bytes, buf2, files) = backend.recv_into_buf(0x4).unwrap(); assert_eq!(bytes, 4); assert_eq!(&buf1[..], &buf2[..]); assert!(files.is_none()); let (bytes, buf2, files) = backend.recv_into_buf(0x2).unwrap(); assert_eq!(bytes, 2); assert_eq!(&buf1[..2], &buf2[..]); assert!(files.is_some()); let files = files.unwrap(); { assert_eq!(files.len(), 3); let mut file = &files[1]; let mut content = String::new(); file.seek(SeekFrom::Start(0)).unwrap(); file.read_to_string(&mut content).unwrap(); assert_eq!(content, "test"); } let (bytes, buf2, files) = backend.recv_into_buf(0x2).unwrap(); assert_eq!(bytes, 2); assert_eq!(&buf1[2..], &buf2[..]); assert!(files.is_none()); // Following communication pattern should not work: // Sending side: data1, data2 with fds // Receiving side: data + partial of data2, left of data2 with fds let len = frontend.send_slice(&buf1[..], None).unwrap(); assert_eq!(len, 4); let len = frontend .send_slice( &buf1[..], Some(&[fd.as_raw_fd(), fd.as_raw_fd(), fd.as_raw_fd()]), ) .unwrap(); assert_eq!(len, 4); let (bytes, _) = backend.recv_data(5).unwrap(); assert_eq!(bytes, 5); let (bytes, _, files) = backend.recv_into_buf(0x4).unwrap(); assert_eq!(bytes, 3); assert!(files.is_none()); // If the target fd array is too small, extra file descriptors will get lost. let len = frontend .send_slice( &buf1[..], Some(&[fd.as_raw_fd(), fd.as_raw_fd(), fd.as_raw_fd()]), ) .unwrap(); assert_eq!(len, 4); let (bytes, _, files) = backend.recv_into_buf(0x4).unwrap(); assert_eq!(bytes, 4); assert!(files.is_some()); } #[test] fn send_recv() { let path = temp_path(); let listener = Listener::new(&path, true).unwrap(); listener.set_nonblocking(true).unwrap(); let mut frontend = Endpoint::::connect(&path).unwrap(); let sock = listener.accept().unwrap().unwrap(); let mut backend = Endpoint::::from_stream(sock); let mut hdr1 = VhostUserMsgHeader::new(FrontendReq::GET_FEATURES, 0, mem::size_of::() as u32); hdr1.set_need_reply(true); let features1 = 0x1u64; frontend.send_message(&hdr1, &features1, None).unwrap(); let mut features2 = 0u64; // SAFETY: Safe because features2 is valid and it's an `u64`. let slice = unsafe { slice::from_raw_parts_mut( (&mut features2 as *mut u64) as *mut u8, mem::size_of::(), ) }; let (hdr2, bytes, files) = backend.recv_body_into_buf(slice).unwrap(); assert_eq!(hdr1, hdr2); assert_eq!(bytes, 8); assert_eq!(features1, features2); assert!(files.is_none()); frontend.send_header(&hdr1, None).unwrap(); let (hdr2, files) = backend.recv_header().unwrap(); assert_eq!(hdr1, hdr2); assert!(files.is_none()); } #[test] fn partial_message() { let path = temp_path(); let listener = Listener::new(&path, true).unwrap(); let mut frontend = UnixStream::connect(&path).unwrap(); let sock = listener.accept().unwrap().unwrap(); let mut backend = Endpoint::::from_stream(sock); write!(frontend, "a").unwrap(); drop(frontend); assert!(matches!(backend.recv_header(), Err(Error::PartialMessage))); } #[test] fn disconnected() { let path = temp_path(); let listener = Listener::new(&path, true).unwrap(); let _ = UnixStream::connect(&path).unwrap(); let sock = listener.accept().unwrap().unwrap(); let mut backend = Endpoint::::from_stream(sock); assert!(matches!(backend.recv_header(), Err(Error::Disconnected))); } } vhost-0.10.0/src/vhost_user/dummy_backend.rs000064400000000000000000000231701046102023000172100ustar 00000000000000// Copyright (C) 2019 Alibaba Cloud Computing. All rights reserved. // SPDX-License-Identifier: Apache-2.0 use std::fs::File; use super::message::*; use super::*; pub const MAX_QUEUE_NUM: usize = 2; pub const MAX_VRING_NUM: usize = 256; pub const MAX_MEM_SLOTS: usize = 32; pub const VIRTIO_FEATURES: u64 = 0x40000003; #[derive(Default)] pub struct DummyBackendReqHandler { pub owned: bool, pub features_acked: bool, pub acked_features: u64, pub acked_protocol_features: u64, pub queue_num: usize, pub vring_num: [u32; MAX_QUEUE_NUM], pub vring_base: [u32; MAX_QUEUE_NUM], pub call_fd: [Option; MAX_QUEUE_NUM], pub kick_fd: [Option; MAX_QUEUE_NUM], pub err_fd: [Option; MAX_QUEUE_NUM], pub vring_started: [bool; MAX_QUEUE_NUM], pub vring_enabled: [bool; MAX_QUEUE_NUM], pub inflight_file: Option, } impl DummyBackendReqHandler { pub fn new() -> Self { DummyBackendReqHandler { queue_num: MAX_QUEUE_NUM, ..Default::default() } } /// Helper to check if VirtioFeature enabled fn check_feature(&self, feat: VhostUserVirtioFeatures) -> Result<()> { if self.acked_features & feat.bits() != 0 { Ok(()) } else { Err(Error::InactiveFeature(feat)) } } /// Helper to check is VhostUserProtocolFeatures enabled fn check_proto_feature(&self, feat: VhostUserProtocolFeatures) -> Result<()> { if self.acked_protocol_features & feat.bits() != 0 { Ok(()) } else { Err(Error::InactiveOperation(feat)) } } } impl VhostUserBackendReqHandlerMut for DummyBackendReqHandler { fn set_owner(&mut self) -> Result<()> { if self.owned { return Err(Error::InvalidOperation("already claimed")); } self.owned = true; Ok(()) } fn reset_owner(&mut self) -> Result<()> { self.owned = false; self.features_acked = false; self.acked_features = 0; self.acked_protocol_features = 0; Ok(()) } fn get_features(&mut self) -> Result { Ok(VIRTIO_FEATURES) } fn set_features(&mut self, features: u64) -> Result<()> { if !self.owned { return Err(Error::InvalidOperation("not owned")); } else if self.features_acked { return Err(Error::InvalidOperation("features already set")); } else if (features & !VIRTIO_FEATURES) != 0 { return Err(Error::InvalidParam); } self.acked_features = features; self.features_acked = true; // If VHOST_USER_F_PROTOCOL_FEATURES has not been negotiated, // the ring is initialized in an enabled state. // If VHOST_USER_F_PROTOCOL_FEATURES has been negotiated, // the ring is initialized in a disabled state. Client must not // pass data to/from the backend until ring is enabled by // VHOST_USER_SET_VRING_ENABLE with parameter 1, or after it has // been disabled by VHOST_USER_SET_VRING_ENABLE with parameter 0. let vring_enabled = self.acked_features & VhostUserVirtioFeatures::PROTOCOL_FEATURES.bits() == 0; for enabled in &mut self.vring_enabled { *enabled = vring_enabled; } Ok(()) } fn set_mem_table(&mut self, _ctx: &[VhostUserMemoryRegion], _files: Vec) -> Result<()> { Ok(()) } fn set_vring_num(&mut self, index: u32, num: u32) -> Result<()> { if index as usize >= self.queue_num || num == 0 || num as usize > MAX_VRING_NUM { return Err(Error::InvalidParam); } self.vring_num[index as usize] = num; Ok(()) } fn set_vring_addr( &mut self, index: u32, _flags: VhostUserVringAddrFlags, _descriptor: u64, _used: u64, _available: u64, _log: u64, ) -> Result<()> { if index as usize >= self.queue_num { return Err(Error::InvalidParam); } Ok(()) } fn set_vring_base(&mut self, index: u32, base: u32) -> Result<()> { if index as usize >= self.queue_num || base as usize >= MAX_VRING_NUM { return Err(Error::InvalidParam); } self.vring_base[index as usize] = base; Ok(()) } fn get_vring_base(&mut self, index: u32) -> Result { if index as usize >= self.queue_num { return Err(Error::InvalidParam); } // Quotation from vhost-user spec: // Client must start ring upon receiving a kick (that is, detecting // that file descriptor is readable) on the descriptor specified by // VHOST_USER_SET_VRING_KICK, and stop ring upon receiving // VHOST_USER_GET_VRING_BASE. self.vring_started[index as usize] = false; Ok(VhostUserVringState::new( index, self.vring_base[index as usize], )) } fn set_vring_kick(&mut self, index: u8, fd: Option) -> Result<()> { if index as usize >= self.queue_num || index as usize > self.queue_num { return Err(Error::InvalidParam); } self.kick_fd[index as usize] = fd; // Quotation from vhost-user spec: // Client must start ring upon receiving a kick (that is, detecting // that file descriptor is readable) on the descriptor specified by // VHOST_USER_SET_VRING_KICK, and stop ring upon receiving // VHOST_USER_GET_VRING_BASE. // // So we should add fd to event monitor(select, poll, epoll) here. self.vring_started[index as usize] = true; Ok(()) } fn set_vring_call(&mut self, index: u8, fd: Option) -> Result<()> { if index as usize >= self.queue_num || index as usize > self.queue_num { return Err(Error::InvalidParam); } self.call_fd[index as usize] = fd; Ok(()) } fn set_vring_err(&mut self, index: u8, fd: Option) -> Result<()> { if index as usize >= self.queue_num || index as usize > self.queue_num { return Err(Error::InvalidParam); } self.err_fd[index as usize] = fd; Ok(()) } fn get_protocol_features(&mut self) -> Result { Ok(VhostUserProtocolFeatures::all()) } fn set_protocol_features(&mut self, features: u64) -> Result<()> { // Note: backend that reported VHOST_USER_F_PROTOCOL_FEATURES must // support this message even before VHOST_USER_SET_FEATURES was // called. // What happens if the frontend calls set_features() with // VHOST_USER_F_PROTOCOL_FEATURES cleared after calling this // interface? self.acked_protocol_features = features; Ok(()) } fn get_queue_num(&mut self) -> Result { Ok(MAX_QUEUE_NUM as u64) } fn set_vring_enable(&mut self, index: u32, enable: bool) -> Result<()> { // This request should be handled only when VHOST_USER_F_PROTOCOL_FEATURES // has been negotiated. self.check_feature(VhostUserVirtioFeatures::PROTOCOL_FEATURES)?; if index as usize >= self.queue_num || index as usize > self.queue_num { return Err(Error::InvalidParam); } // Backend must not pass data to/from the backend until ring is // enabled by VHOST_USER_SET_VRING_ENABLE with parameter 1, // or after it has been disabled by VHOST_USER_SET_VRING_ENABLE // with parameter 0. self.vring_enabled[index as usize] = enable; Ok(()) } fn get_config( &mut self, offset: u32, size: u32, _flags: VhostUserConfigFlags, ) -> Result> { self.check_proto_feature(VhostUserProtocolFeatures::CONFIG)?; if !(VHOST_USER_CONFIG_OFFSET..VHOST_USER_CONFIG_SIZE).contains(&offset) || size > VHOST_USER_CONFIG_SIZE - VHOST_USER_CONFIG_OFFSET || size + offset > VHOST_USER_CONFIG_SIZE { return Err(Error::InvalidParam); } assert_eq!(offset, 0x100); assert_eq!(size, 4); Ok(vec![0xa5; size as usize]) } fn set_config(&mut self, offset: u32, buf: &[u8], _flags: VhostUserConfigFlags) -> Result<()> { let size = buf.len() as u32; self.check_proto_feature(VhostUserProtocolFeatures::CONFIG)?; if !(VHOST_USER_CONFIG_OFFSET..VHOST_USER_CONFIG_SIZE).contains(&offset) || size > VHOST_USER_CONFIG_SIZE - VHOST_USER_CONFIG_OFFSET || size + offset > VHOST_USER_CONFIG_SIZE { return Err(Error::InvalidParam); } assert_eq!(offset, 0x100); assert_eq!(buf.len(), 4); assert_eq!(buf, &[0xa5; 4]); Ok(()) } fn get_inflight_fd( &mut self, inflight: &VhostUserInflight, ) -> Result<(VhostUserInflight, File)> { let file = tempfile::tempfile().unwrap(); self.inflight_file = Some(file.try_clone().unwrap()); Ok(( VhostUserInflight { mmap_size: 0x1000, mmap_offset: 0, num_queues: inflight.num_queues, queue_size: inflight.queue_size, }, file, )) } fn set_inflight_fd(&mut self, _inflight: &VhostUserInflight, _file: File) -> Result<()> { Ok(()) } fn get_max_mem_slots(&mut self) -> Result { Ok(MAX_MEM_SLOTS as u64) } fn add_mem_region(&mut self, _region: &VhostUserSingleMemoryRegion, _fd: File) -> Result<()> { Ok(()) } fn remove_mem_region(&mut self, _region: &VhostUserSingleMemoryRegion) -> Result<()> { Ok(()) } } vhost-0.10.0/src/vhost_user/frontend.rs000064400000000000000000001231461046102023000162310ustar 00000000000000// Copyright (C) 2019 Alibaba Cloud Computing. All rights reserved. // SPDX-License-Identifier: Apache-2.0 //! Traits and Struct for vhost-user frontend. use std::fs::File; use std::mem; use std::os::unix::io::{AsRawFd, RawFd}; use std::os::unix::net::UnixStream; use std::path::Path; use std::sync::{Arc, Mutex, MutexGuard}; use vm_memory::ByteValued; use vmm_sys_util::eventfd::EventFd; use super::connection::Endpoint; use super::message::*; use super::{take_single_file, Error as VhostUserError, Result as VhostUserResult}; use crate::backend::{ VhostBackend, VhostUserDirtyLogRegion, VhostUserMemoryRegionInfo, VringConfigData, }; use crate::{Error, Result}; /// Trait for vhost-user frontend to provide extra methods not covered by the VhostBackend yet. pub trait VhostUserFrontend: VhostBackend { /// Get the protocol feature bitmask from the underlying vhost implementation. fn get_protocol_features(&mut self) -> Result; /// Enable protocol features in the underlying vhost implementation. fn set_protocol_features(&mut self, features: VhostUserProtocolFeatures) -> Result<()>; /// Query how many queues the backend supports. fn get_queue_num(&mut self) -> Result; /// Signal backend to enable or disable corresponding vring. /// /// Backend must not pass data to/from the backend until ring is enabled by /// VHOST_USER_SET_VRING_ENABLE with parameter 1, or after it has been /// disabled by VHOST_USER_SET_VRING_ENABLE with parameter 0. fn set_vring_enable(&mut self, queue_index: usize, enable: bool) -> Result<()>; /// Fetch the contents of the virtio device configuration space. fn get_config( &mut self, offset: u32, size: u32, flags: VhostUserConfigFlags, buf: &[u8], ) -> Result<(VhostUserConfig, VhostUserConfigPayload)>; /// Change the virtio device configuration space. It also can be used for live migration on the /// destination host to set readonly configuration space fields. fn set_config(&mut self, offset: u32, flags: VhostUserConfigFlags, buf: &[u8]) -> Result<()>; /// Setup backend communication channel. fn set_backend_request_fd(&mut self, fd: &dyn AsRawFd) -> Result<()>; /// Retrieve shared buffer for inflight I/O tracking. fn get_inflight_fd( &mut self, inflight: &VhostUserInflight, ) -> Result<(VhostUserInflight, File)>; /// Set shared buffer for inflight I/O tracking. fn set_inflight_fd(&mut self, inflight: &VhostUserInflight, fd: RawFd) -> Result<()>; /// Query the maximum amount of memory slots supported by the backend. fn get_max_mem_slots(&mut self) -> Result; /// Add a new guest memory mapping for vhost to use. fn add_mem_region(&mut self, region: &VhostUserMemoryRegionInfo) -> Result<()>; /// Remove a guest memory mapping from vhost. fn remove_mem_region(&mut self, region: &VhostUserMemoryRegionInfo) -> Result<()>; } fn error_code(err: VhostUserError) -> Result { Err(Error::VhostUserProtocol(err)) } /// Struct for the vhost-user frontend endpoint. #[derive(Clone)] pub struct Frontend { node: Arc>, } impl Frontend { /// Create a new instance. fn new(ep: Endpoint, max_queue_num: u64) -> Self { Frontend { node: Arc::new(Mutex::new(FrontendInternal { main_sock: ep, virtio_features: 0, acked_virtio_features: 0, protocol_features: 0, acked_protocol_features: 0, protocol_features_ready: false, max_queue_num, error: None, hdr_flags: VhostUserHeaderFlag::empty(), })), } } fn node(&self) -> MutexGuard { self.node.lock().unwrap() } /// Create a new instance from a Unix stream socket. pub fn from_stream(sock: UnixStream, max_queue_num: u64) -> Self { Self::new(Endpoint::::from_stream(sock), max_queue_num) } /// Create a new vhost-user frontend endpoint. /// /// Will retry as the backend may not be ready to accept the connection. /// /// # Arguments /// * `path` - path of Unix domain socket listener to connect to pub fn connect>(path: P, max_queue_num: u64) -> Result { let mut retry_count = 5; let endpoint = loop { match Endpoint::::connect(&path) { Ok(endpoint) => break Ok(endpoint), Err(e) => match &e { VhostUserError::SocketConnect(why) => { if why.kind() == std::io::ErrorKind::ConnectionRefused && retry_count > 0 { std::thread::sleep(std::time::Duration::from_millis(100)); retry_count -= 1; continue; } else { break Err(e); } } _ => break Err(e), }, } }?; Ok(Self::new(endpoint, max_queue_num)) } /// Set the header flags that should be applied to all following messages. pub fn set_hdr_flags(&self, flags: VhostUserHeaderFlag) { let mut node = self.node(); node.hdr_flags = flags; } } impl VhostBackend for Frontend { /// Get from the underlying vhost implementation the feature bitmask. fn get_features(&self) -> Result { let mut node = self.node(); let hdr = node.send_request_header(FrontendReq::GET_FEATURES, None)?; let val = node.recv_reply::(&hdr)?; node.virtio_features = val.value; Ok(node.virtio_features) } /// Enable features in the underlying vhost implementation using a bitmask. fn set_features(&self, features: u64) -> Result<()> { let mut node = self.node(); let val = VhostUserU64::new(features); let hdr = node.send_request_with_body(FrontendReq::SET_FEATURES, &val, None)?; node.acked_virtio_features = features & node.virtio_features; node.wait_for_ack(&hdr).map_err(|e| e.into()) } /// Set the current Frontend as an owner of the session. fn set_owner(&self) -> Result<()> { // We unwrap() the return value to assert that we are not expecting threads to ever fail // while holding the lock. let mut node = self.node(); let hdr = node.send_request_header(FrontendReq::SET_OWNER, None)?; node.wait_for_ack(&hdr).map_err(|e| e.into()) } fn reset_owner(&self) -> Result<()> { let mut node = self.node(); let hdr = node.send_request_header(FrontendReq::RESET_OWNER, None)?; node.wait_for_ack(&hdr).map_err(|e| e.into()) } /// Set the memory map regions on the backend so it can translate the vring /// addresses. In the ancillary data there is an array of file descriptors fn set_mem_table(&self, regions: &[VhostUserMemoryRegionInfo]) -> Result<()> { if regions.is_empty() || regions.len() > MAX_ATTACHED_FD_ENTRIES { return error_code(VhostUserError::InvalidParam); } let mut ctx = VhostUserMemoryContext::new(); for region in regions.iter() { if region.memory_size == 0 || region.mmap_handle < 0 { return error_code(VhostUserError::InvalidParam); } ctx.append(®ion.to_region(), region.mmap_handle); } let mut node = self.node(); let body = VhostUserMemory::new(ctx.regions.len() as u32); // SAFETY: Safe because ctx.regions is a valid Vec() at this point. let (_, payload, _) = unsafe { ctx.regions.align_to::() }; let hdr = node.send_request_with_payload( FrontendReq::SET_MEM_TABLE, &body, payload, Some(ctx.fds.as_slice()), )?; node.wait_for_ack(&hdr).map_err(|e| e.into()) } // Clippy doesn't seem to know that if let with && is still experimental #[allow(clippy::unnecessary_unwrap)] fn set_log_base(&self, base: u64, region: Option) -> Result<()> { let mut node = self.node(); let val = VhostUserU64::new(base); if node.acked_protocol_features & VhostUserProtocolFeatures::LOG_SHMFD.bits() != 0 && region.is_some() { let region = region.unwrap(); let log = VhostUserLog { mmap_size: region.mmap_size, mmap_offset: region.mmap_offset, }; let hdr = node.send_request_with_body( FrontendReq::SET_LOG_BASE, &log, Some(&[region.mmap_handle]), )?; node.wait_for_ack(&hdr).map_err(|e| e.into()) } else { let _ = node.send_request_with_body(FrontendReq::SET_LOG_BASE, &val, None)?; Ok(()) } } fn set_log_fd(&self, fd: RawFd) -> Result<()> { let mut node = self.node(); let fds = [fd]; let hdr = node.send_request_header(FrontendReq::SET_LOG_FD, Some(&fds))?; node.wait_for_ack(&hdr).map_err(|e| e.into()) } /// Set the size of the queue. fn set_vring_num(&self, queue_index: usize, num: u16) -> Result<()> { let mut node = self.node(); if queue_index as u64 >= node.max_queue_num { return error_code(VhostUserError::InvalidParam); } let val = VhostUserVringState::new(queue_index as u32, num.into()); let hdr = node.send_request_with_body(FrontendReq::SET_VRING_NUM, &val, None)?; node.wait_for_ack(&hdr).map_err(|e| e.into()) } /// Sets the addresses of the different aspects of the vring. fn set_vring_addr(&self, queue_index: usize, config_data: &VringConfigData) -> Result<()> { let mut node = self.node(); if queue_index as u64 >= node.max_queue_num || config_data.flags & !(VhostUserVringAddrFlags::all().bits()) != 0 { return error_code(VhostUserError::InvalidParam); } let val = VhostUserVringAddr::from_config_data(queue_index as u32, config_data); let hdr = node.send_request_with_body(FrontendReq::SET_VRING_ADDR, &val, None)?; node.wait_for_ack(&hdr).map_err(|e| e.into()) } /// Sets the base offset in the available vring. fn set_vring_base(&self, queue_index: usize, base: u16) -> Result<()> { let mut node = self.node(); if queue_index as u64 >= node.max_queue_num { return error_code(VhostUserError::InvalidParam); } let val = VhostUserVringState::new(queue_index as u32, base.into()); let hdr = node.send_request_with_body(FrontendReq::SET_VRING_BASE, &val, None)?; node.wait_for_ack(&hdr).map_err(|e| e.into()) } fn get_vring_base(&self, queue_index: usize) -> Result { let mut node = self.node(); if queue_index as u64 >= node.max_queue_num { return error_code(VhostUserError::InvalidParam); } let req = VhostUserVringState::new(queue_index as u32, 0); let hdr = node.send_request_with_body(FrontendReq::GET_VRING_BASE, &req, None)?; let reply = node.recv_reply::(&hdr)?; Ok(reply.num) } /// Set the event file descriptor to signal when buffers are used. /// Bits (0-7) of the payload contain the vring index. Bit 8 is the invalid FD flag. This flag /// is set when there is no file descriptor in the ancillary data. This signals that polling /// will be used instead of waiting for the call. fn set_vring_call(&self, queue_index: usize, fd: &EventFd) -> Result<()> { let mut node = self.node(); if queue_index as u64 >= node.max_queue_num { return error_code(VhostUserError::InvalidParam); } let hdr = node.send_fd_for_vring(FrontendReq::SET_VRING_CALL, queue_index, fd.as_raw_fd())?; node.wait_for_ack(&hdr).map_err(|e| e.into()) } /// Set the event file descriptor for adding buffers to the vring. /// Bits (0-7) of the payload contain the vring index. Bit 8 is the invalid FD flag. This flag /// is set when there is no file descriptor in the ancillary data. This signals that polling /// should be used instead of waiting for a kick. fn set_vring_kick(&self, queue_index: usize, fd: &EventFd) -> Result<()> { let mut node = self.node(); if queue_index as u64 >= node.max_queue_num { return error_code(VhostUserError::InvalidParam); } let hdr = node.send_fd_for_vring(FrontendReq::SET_VRING_KICK, queue_index, fd.as_raw_fd())?; node.wait_for_ack(&hdr).map_err(|e| e.into()) } /// Set the event file descriptor to signal when error occurs. /// Bits (0-7) of the payload contain the vring index. Bit 8 is the invalid FD flag. This flag /// is set when there is no file descriptor in the ancillary data. fn set_vring_err(&self, queue_index: usize, fd: &EventFd) -> Result<()> { let mut node = self.node(); if queue_index as u64 >= node.max_queue_num { return error_code(VhostUserError::InvalidParam); } let hdr = node.send_fd_for_vring(FrontendReq::SET_VRING_ERR, queue_index, fd.as_raw_fd())?; node.wait_for_ack(&hdr).map_err(|e| e.into()) } } impl VhostUserFrontend for Frontend { fn get_protocol_features(&mut self) -> Result { let mut node = self.node(); node.check_feature(VhostUserVirtioFeatures::PROTOCOL_FEATURES)?; let hdr = node.send_request_header(FrontendReq::GET_PROTOCOL_FEATURES, None)?; let val = node.recv_reply::(&hdr)?; node.protocol_features = val.value; // Should we support forward compatibility? // If so just mask out unrecognized flags instead of return errors. match VhostUserProtocolFeatures::from_bits(node.protocol_features) { Some(val) => Ok(val), None => error_code(VhostUserError::InvalidMessage), } } fn set_protocol_features(&mut self, features: VhostUserProtocolFeatures) -> Result<()> { let mut node = self.node(); node.check_feature(VhostUserVirtioFeatures::PROTOCOL_FEATURES)?; let val = VhostUserU64::new(features.bits()); let hdr = node.send_request_with_body(FrontendReq::SET_PROTOCOL_FEATURES, &val, None)?; // Don't wait for ACK here because the protocol feature negotiation process hasn't been // completed yet. node.acked_protocol_features = features.bits(); node.protocol_features_ready = true; node.wait_for_ack(&hdr).map_err(|e| e.into()) } fn get_queue_num(&mut self) -> Result { let mut node = self.node(); node.check_proto_feature(VhostUserProtocolFeatures::MQ)?; let hdr = node.send_request_header(FrontendReq::GET_QUEUE_NUM, None)?; let val = node.recv_reply::(&hdr)?; if val.value > VHOST_USER_MAX_VRINGS { return error_code(VhostUserError::InvalidMessage); } node.max_queue_num = val.value; Ok(node.max_queue_num) } fn set_vring_enable(&mut self, queue_index: usize, enable: bool) -> Result<()> { let mut node = self.node(); // set_vring_enable() is supported only when PROTOCOL_FEATURES has been enabled. if node.acked_virtio_features & VhostUserVirtioFeatures::PROTOCOL_FEATURES.bits() == 0 { return error_code(VhostUserError::InactiveFeature( VhostUserVirtioFeatures::PROTOCOL_FEATURES, )); } else if queue_index as u64 >= node.max_queue_num { return error_code(VhostUserError::InvalidParam); } let flag = enable.into(); let val = VhostUserVringState::new(queue_index as u32, flag); let hdr = node.send_request_with_body(FrontendReq::SET_VRING_ENABLE, &val, None)?; node.wait_for_ack(&hdr).map_err(|e| e.into()) } fn get_config( &mut self, offset: u32, size: u32, flags: VhostUserConfigFlags, buf: &[u8], ) -> Result<(VhostUserConfig, VhostUserConfigPayload)> { let body = VhostUserConfig::new(offset, size, flags); if !body.is_valid() { return error_code(VhostUserError::InvalidParam); } let mut node = self.node(); // depends on VhostUserProtocolFeatures::CONFIG node.check_proto_feature(VhostUserProtocolFeatures::CONFIG)?; // vhost-user spec states that: // "Frontend payload: virtio device config space" // "Backend payload: virtio device config space" let hdr = node.send_request_with_payload(FrontendReq::GET_CONFIG, &body, buf, None)?; let (body_reply, buf_reply, rfds) = node.recv_reply_with_payload::(&hdr)?; if rfds.is_some() { return error_code(VhostUserError::InvalidMessage); } else if body_reply.size == 0 { return error_code(VhostUserError::BackendInternalError); } else if body_reply.size != body.size || body_reply.size as usize != buf.len() || body_reply.offset != body.offset { return error_code(VhostUserError::InvalidMessage); } Ok((body_reply, buf_reply)) } fn set_config(&mut self, offset: u32, flags: VhostUserConfigFlags, buf: &[u8]) -> Result<()> { if buf.len() > MAX_MSG_SIZE { return error_code(VhostUserError::InvalidParam); } let body = VhostUserConfig::new(offset, buf.len() as u32, flags); if !body.is_valid() { return error_code(VhostUserError::InvalidParam); } let mut node = self.node(); // depends on VhostUserProtocolFeatures::CONFIG node.check_proto_feature(VhostUserProtocolFeatures::CONFIG)?; let hdr = node.send_request_with_payload(FrontendReq::SET_CONFIG, &body, buf, None)?; node.wait_for_ack(&hdr).map_err(|e| e.into()) } fn set_backend_request_fd(&mut self, fd: &dyn AsRawFd) -> Result<()> { let mut node = self.node(); node.check_proto_feature(VhostUserProtocolFeatures::BACKEND_REQ)?; let fds = [fd.as_raw_fd()]; let hdr = node.send_request_header(FrontendReq::SET_BACKEND_REQ_FD, Some(&fds))?; node.wait_for_ack(&hdr).map_err(|e| e.into()) } fn get_inflight_fd( &mut self, inflight: &VhostUserInflight, ) -> Result<(VhostUserInflight, File)> { let mut node = self.node(); node.check_proto_feature(VhostUserProtocolFeatures::INFLIGHT_SHMFD)?; let hdr = node.send_request_with_body(FrontendReq::GET_INFLIGHT_FD, inflight, None)?; let (inflight, files) = node.recv_reply_with_files::(&hdr)?; match take_single_file(files) { Some(file) => Ok((inflight, file)), None => error_code(VhostUserError::IncorrectFds), } } fn set_inflight_fd(&mut self, inflight: &VhostUserInflight, fd: RawFd) -> Result<()> { let mut node = self.node(); node.check_proto_feature(VhostUserProtocolFeatures::INFLIGHT_SHMFD)?; if inflight.mmap_size == 0 || inflight.num_queues == 0 || inflight.queue_size == 0 || fd < 0 { return error_code(VhostUserError::InvalidParam); } let hdr = node.send_request_with_body(FrontendReq::SET_INFLIGHT_FD, inflight, Some(&[fd]))?; node.wait_for_ack(&hdr).map_err(|e| e.into()) } fn get_max_mem_slots(&mut self) -> Result { let mut node = self.node(); node.check_proto_feature(VhostUserProtocolFeatures::CONFIGURE_MEM_SLOTS)?; let hdr = node.send_request_header(FrontendReq::GET_MAX_MEM_SLOTS, None)?; let val = node.recv_reply::(&hdr)?; Ok(val.value) } fn add_mem_region(&mut self, region: &VhostUserMemoryRegionInfo) -> Result<()> { let mut node = self.node(); node.check_proto_feature(VhostUserProtocolFeatures::CONFIGURE_MEM_SLOTS)?; if region.memory_size == 0 || region.mmap_handle < 0 { return error_code(VhostUserError::InvalidParam); } let body = region.to_single_region(); let fds = [region.mmap_handle]; let hdr = node.send_request_with_body(FrontendReq::ADD_MEM_REG, &body, Some(&fds))?; node.wait_for_ack(&hdr).map_err(|e| e.into()) } fn remove_mem_region(&mut self, region: &VhostUserMemoryRegionInfo) -> Result<()> { let mut node = self.node(); node.check_proto_feature(VhostUserProtocolFeatures::CONFIGURE_MEM_SLOTS)?; if region.memory_size == 0 { return error_code(VhostUserError::InvalidParam); } let body = region.to_single_region(); let hdr = node.send_request_with_body(FrontendReq::REM_MEM_REG, &body, None)?; node.wait_for_ack(&hdr).map_err(|e| e.into()) } } impl AsRawFd for Frontend { fn as_raw_fd(&self) -> RawFd { let node = self.node(); node.main_sock.as_raw_fd() } } /// Context object to pass guest memory configuration to VhostUserFrontend::set_mem_table(). struct VhostUserMemoryContext { regions: VhostUserMemoryPayload, fds: Vec, } impl VhostUserMemoryContext { /// Create a context object. pub fn new() -> Self { VhostUserMemoryContext { regions: VhostUserMemoryPayload::new(), fds: Vec::new(), } } /// Append a user memory region and corresponding RawFd into the context object. pub fn append(&mut self, region: &VhostUserMemoryRegion, fd: RawFd) { self.regions.push(*region); self.fds.push(fd); } } struct FrontendInternal { // Used to send requests to the backend. main_sock: Endpoint, // Cached virtio features from the backend. virtio_features: u64, // Cached acked virtio features from the driver. acked_virtio_features: u64, // Cached vhost-user protocol features from the backend. protocol_features: u64, // Cached vhost-user protocol features. acked_protocol_features: u64, // Cached vhost-user protocol features are ready to use. protocol_features_ready: bool, // Cached maxinum number of queues supported from the backend. max_queue_num: u64, // Internal flag to mark failure state. error: Option, // List of header flags. hdr_flags: VhostUserHeaderFlag, } impl FrontendInternal { fn send_request_header( &mut self, code: FrontendReq, fds: Option<&[RawFd]>, ) -> VhostUserResult> { self.check_state()?; let hdr = self.new_request_header(code, 0); self.main_sock.send_header(&hdr, fds)?; Ok(hdr) } fn send_request_with_body( &mut self, code: FrontendReq, msg: &T, fds: Option<&[RawFd]>, ) -> VhostUserResult> { if mem::size_of::() > MAX_MSG_SIZE { return Err(VhostUserError::InvalidParam); } self.check_state()?; let hdr = self.new_request_header(code, mem::size_of::() as u32); self.main_sock.send_message(&hdr, msg, fds)?; Ok(hdr) } fn send_request_with_payload( &mut self, code: FrontendReq, msg: &T, payload: &[u8], fds: Option<&[RawFd]>, ) -> VhostUserResult> { let len = mem::size_of::() + payload.len(); if len > MAX_MSG_SIZE { return Err(VhostUserError::InvalidParam); } if let Some(fd_arr) = fds { if fd_arr.len() > MAX_ATTACHED_FD_ENTRIES { return Err(VhostUserError::InvalidParam); } } self.check_state()?; let hdr = self.new_request_header(code, len as u32); self.main_sock .send_message_with_payload(&hdr, msg, payload, fds)?; Ok(hdr) } fn send_fd_for_vring( &mut self, code: FrontendReq, queue_index: usize, fd: RawFd, ) -> VhostUserResult> { if queue_index as u64 >= self.max_queue_num { return Err(VhostUserError::InvalidParam); } self.check_state()?; // Bits (0-7) of the payload contain the vring index. Bit 8 is the invalid FD flag. // This flag is set when there is no file descriptor in the ancillary data. This signals // that polling will be used instead of waiting for the call. let msg = VhostUserU64::new(queue_index as u64); let hdr = self.new_request_header(code, mem::size_of::() as u32); self.main_sock.send_message(&hdr, &msg, Some(&[fd]))?; Ok(hdr) } fn recv_reply( &mut self, hdr: &VhostUserMsgHeader, ) -> VhostUserResult { if mem::size_of::() > MAX_MSG_SIZE || hdr.is_reply() { return Err(VhostUserError::InvalidParam); } self.check_state()?; let (reply, body, rfds) = self.main_sock.recv_body::()?; if !reply.is_reply_for(hdr) || rfds.is_some() || !body.is_valid() { return Err(VhostUserError::InvalidMessage); } Ok(body) } fn recv_reply_with_files( &mut self, hdr: &VhostUserMsgHeader, ) -> VhostUserResult<(T, Option>)> { if mem::size_of::() > MAX_MSG_SIZE || hdr.is_reply() { return Err(VhostUserError::InvalidParam); } self.check_state()?; let (reply, body, files) = self.main_sock.recv_body::()?; if !reply.is_reply_for(hdr) || files.is_none() || !body.is_valid() { return Err(VhostUserError::InvalidMessage); } Ok((body, files)) } fn recv_reply_with_payload( &mut self, hdr: &VhostUserMsgHeader, ) -> VhostUserResult<(T, Vec, Option>)> { if mem::size_of::() > MAX_MSG_SIZE || hdr.get_size() as usize <= mem::size_of::() || hdr.get_size() as usize > MAX_MSG_SIZE || hdr.is_reply() { return Err(VhostUserError::InvalidParam); } self.check_state()?; let mut buf: Vec = vec![0; hdr.get_size() as usize - mem::size_of::()]; let (reply, body, bytes, files) = self.main_sock.recv_payload_into_buf::(&mut buf)?; if !reply.is_reply_for(hdr) || reply.get_size() as usize != mem::size_of::() + bytes || files.is_some() || !body.is_valid() || bytes != buf.len() { return Err(VhostUserError::InvalidMessage); } Ok((body, buf, files)) } fn wait_for_ack(&mut self, hdr: &VhostUserMsgHeader) -> VhostUserResult<()> { if self.acked_protocol_features & VhostUserProtocolFeatures::REPLY_ACK.bits() == 0 || !hdr.is_need_reply() { return Ok(()); } self.check_state()?; let (reply, body, rfds) = self.main_sock.recv_body::()?; if !reply.is_reply_for(hdr) || rfds.is_some() || !body.is_valid() { return Err(VhostUserError::InvalidMessage); } if body.value != 0 { return Err(VhostUserError::BackendInternalError); } Ok(()) } fn check_feature(&self, feat: VhostUserVirtioFeatures) -> VhostUserResult<()> { if self.virtio_features & feat.bits() != 0 { Ok(()) } else { Err(VhostUserError::InactiveFeature(feat)) } } fn check_proto_feature(&self, feat: VhostUserProtocolFeatures) -> VhostUserResult<()> { if self.acked_protocol_features & feat.bits() != 0 { Ok(()) } else { Err(VhostUserError::InactiveOperation(feat)) } } fn check_state(&self) -> VhostUserResult<()> { match self.error { Some(e) => Err(VhostUserError::SocketBroken( std::io::Error::from_raw_os_error(e), )), None => Ok(()), } } #[inline] fn new_request_header( &self, request: FrontendReq, size: u32, ) -> VhostUserMsgHeader { VhostUserMsgHeader::new(request, self.hdr_flags.bits() | 0x1, size) } } #[cfg(test)] mod tests { use super::super::connection::Listener; use super::*; use vmm_sys_util::rand::rand_alphanumerics; use std::path::PathBuf; fn temp_path() -> PathBuf { PathBuf::from(format!( "/tmp/vhost_test_{}", rand_alphanumerics(8).to_str().unwrap() )) } fn create_pair>(path: P) -> (Frontend, Endpoint) { let listener = Listener::new(&path, true).unwrap(); listener.set_nonblocking(true).unwrap(); let frontend = Frontend::connect(path, 2).unwrap(); let backend = listener.accept().unwrap().unwrap(); (frontend, Endpoint::from_stream(backend)) } #[test] fn create_frontend() { let path = temp_path(); let listener = Listener::new(&path, true).unwrap(); listener.set_nonblocking(true).unwrap(); let frontend = Frontend::connect(&path, 1).unwrap(); let mut backend = Endpoint::::from_stream(listener.accept().unwrap().unwrap()); assert!(frontend.as_raw_fd() > 0); // Send two messages continuously frontend.set_owner().unwrap(); frontend.reset_owner().unwrap(); let (hdr, rfds) = backend.recv_header().unwrap(); assert_eq!(hdr.get_code().unwrap(), FrontendReq::SET_OWNER); assert_eq!(hdr.get_size(), 0); assert_eq!(hdr.get_version(), 0x1); assert!(rfds.is_none()); let (hdr, rfds) = backend.recv_header().unwrap(); assert_eq!(hdr.get_code().unwrap(), FrontendReq::RESET_OWNER); assert_eq!(hdr.get_size(), 0); assert_eq!(hdr.get_version(), 0x1); assert!(rfds.is_none()); } #[test] fn test_create_failure() { let path = temp_path(); let _ = Listener::new(&path, true).unwrap(); let _ = Listener::new(&path, false).is_err(); assert!(Frontend::connect(&path, 1).is_err()); let listener = Listener::new(&path, true).unwrap(); assert!(Listener::new(&path, false).is_err()); listener.set_nonblocking(true).unwrap(); let _frontend = Frontend::connect(&path, 1).unwrap(); let _backend = listener.accept().unwrap().unwrap(); } #[test] fn test_features() { let path = temp_path(); let (frontend, mut peer) = create_pair(path); frontend.set_owner().unwrap(); let (hdr, rfds) = peer.recv_header().unwrap(); assert_eq!(hdr.get_code().unwrap(), FrontendReq::SET_OWNER); assert_eq!(hdr.get_size(), 0); assert_eq!(hdr.get_version(), 0x1); assert!(rfds.is_none()); let hdr = VhostUserMsgHeader::new(FrontendReq::GET_FEATURES, 0x4, 8); let msg = VhostUserU64::new(0x15); peer.send_message(&hdr, &msg, None).unwrap(); let features = frontend.get_features().unwrap(); assert_eq!(features, 0x15u64); let (_hdr, rfds) = peer.recv_header().unwrap(); assert!(rfds.is_none()); let hdr = VhostUserMsgHeader::new(FrontendReq::SET_FEATURES, 0x4, 8); let msg = VhostUserU64::new(0x15); peer.send_message(&hdr, &msg, None).unwrap(); frontend.set_features(0x15).unwrap(); let (_hdr, msg, rfds) = peer.recv_body::().unwrap(); assert!(rfds.is_none()); let val = msg.value; assert_eq!(val, 0x15); let hdr = VhostUserMsgHeader::new(FrontendReq::GET_FEATURES, 0x4, 8); let msg = 0x15u32; peer.send_message(&hdr, &msg, None).unwrap(); assert!(frontend.get_features().is_err()); } #[test] fn test_protocol_features() { let path = temp_path(); let (mut frontend, mut peer) = create_pair(path); frontend.set_owner().unwrap(); let (hdr, rfds) = peer.recv_header().unwrap(); assert_eq!(hdr.get_code().unwrap(), FrontendReq::SET_OWNER); assert!(rfds.is_none()); assert!(frontend.get_protocol_features().is_err()); assert!(frontend .set_protocol_features(VhostUserProtocolFeatures::all()) .is_err()); let vfeatures = 0x15 | VhostUserVirtioFeatures::PROTOCOL_FEATURES.bits(); let hdr = VhostUserMsgHeader::new(FrontendReq::GET_FEATURES, 0x4, 8); let msg = VhostUserU64::new(vfeatures); peer.send_message(&hdr, &msg, None).unwrap(); let features = frontend.get_features().unwrap(); assert_eq!(features, vfeatures); let (_hdr, rfds) = peer.recv_header().unwrap(); assert!(rfds.is_none()); frontend.set_features(vfeatures).unwrap(); let (_hdr, msg, rfds) = peer.recv_body::().unwrap(); assert!(rfds.is_none()); let val = msg.value; assert_eq!(val, vfeatures); let pfeatures = VhostUserProtocolFeatures::all(); let hdr = VhostUserMsgHeader::new(FrontendReq::GET_PROTOCOL_FEATURES, 0x4, 8); let msg = VhostUserU64::new(pfeatures.bits()); peer.send_message(&hdr, &msg, None).unwrap(); let features = frontend.get_protocol_features().unwrap(); assert_eq!(features, pfeatures); let (_hdr, rfds) = peer.recv_header().unwrap(); assert!(rfds.is_none()); frontend.set_protocol_features(pfeatures).unwrap(); let (_hdr, msg, rfds) = peer.recv_body::().unwrap(); assert!(rfds.is_none()); let val = msg.value; assert_eq!(val, pfeatures.bits()); let hdr = VhostUserMsgHeader::new(FrontendReq::SET_PROTOCOL_FEATURES, 0x4, 8); let msg = VhostUserU64::new(pfeatures.bits()); peer.send_message(&hdr, &msg, None).unwrap(); assert!(frontend.get_protocol_features().is_err()); } #[test] fn test_frontend_set_config_negative() { let path = temp_path(); let (mut frontend, _peer) = create_pair(path); let buf = vec![0x0; MAX_MSG_SIZE + 1]; frontend .set_config(0x100, VhostUserConfigFlags::WRITABLE, &buf[0..4]) .unwrap_err(); { let mut node = frontend.node(); node.virtio_features = 0xffff_ffff; node.acked_virtio_features = 0xffff_ffff; node.protocol_features = 0xffff_ffff; node.acked_protocol_features = 0xffff_ffff; } frontend .set_config(0, VhostUserConfigFlags::WRITABLE, &buf[0..4]) .unwrap(); frontend .set_config( VHOST_USER_CONFIG_SIZE, VhostUserConfigFlags::WRITABLE, &buf[0..4], ) .unwrap_err(); frontend .set_config(0x1000, VhostUserConfigFlags::WRITABLE, &buf[0..4]) .unwrap_err(); frontend .set_config( 0x100, // This is a negative test, so we are setting unexpected flags. VhostUserConfigFlags::from_bits_retain(0xffff_ffff), &buf[0..4], ) .unwrap_err(); frontend .set_config(VHOST_USER_CONFIG_SIZE, VhostUserConfigFlags::WRITABLE, &buf) .unwrap_err(); frontend .set_config(VHOST_USER_CONFIG_SIZE, VhostUserConfigFlags::WRITABLE, &[]) .unwrap_err(); } fn create_pair2() -> (Frontend, Endpoint) { let path = temp_path(); let (frontend, peer) = create_pair(path); { let mut node = frontend.node(); node.virtio_features = 0xffff_ffff; node.acked_virtio_features = 0xffff_ffff; node.protocol_features = 0xffff_ffff; node.acked_protocol_features = 0xffff_ffff; } (frontend, peer) } #[test] fn test_frontend_get_config_negative0() { let (mut frontend, mut peer) = create_pair2(); let buf = vec![0x0; MAX_MSG_SIZE + 1]; let mut hdr = VhostUserMsgHeader::new(FrontendReq::GET_CONFIG, 0x4, 16); let msg = VhostUserConfig::new(0x100, 4, VhostUserConfigFlags::empty()); peer.send_message_with_payload(&hdr, &msg, &buf[0..4], None) .unwrap(); assert!(frontend .get_config(0x100, 4, VhostUserConfigFlags::WRITABLE, &buf[0..4]) .is_ok()); hdr.set_code(FrontendReq::GET_FEATURES); peer.send_message_with_payload(&hdr, &msg, &buf[0..4], None) .unwrap(); assert!(frontend .get_config(0x100, 4, VhostUserConfigFlags::WRITABLE, &buf[0..4]) .is_err()); hdr.set_code(FrontendReq::GET_CONFIG); } #[test] fn test_frontend_get_config_negative1() { let (mut frontend, mut peer) = create_pair2(); let buf = vec![0x0; MAX_MSG_SIZE + 1]; let mut hdr = VhostUserMsgHeader::new(FrontendReq::GET_CONFIG, 0x4, 16); let msg = VhostUserConfig::new(0x100, 4, VhostUserConfigFlags::empty()); peer.send_message_with_payload(&hdr, &msg, &buf[0..4], None) .unwrap(); assert!(frontend .get_config(0x100, 4, VhostUserConfigFlags::WRITABLE, &buf[0..4]) .is_ok()); hdr.set_reply(false); peer.send_message_with_payload(&hdr, &msg, &buf[0..4], None) .unwrap(); assert!(frontend .get_config(0x100, 4, VhostUserConfigFlags::WRITABLE, &buf[0..4]) .is_err()); } #[test] fn test_frontend_get_config_negative2() { let (mut frontend, mut peer) = create_pair2(); let buf = vec![0x0; MAX_MSG_SIZE + 1]; let hdr = VhostUserMsgHeader::new(FrontendReq::GET_CONFIG, 0x4, 16); let msg = VhostUserConfig::new(0x100, 4, VhostUserConfigFlags::empty()); peer.send_message_with_payload(&hdr, &msg, &buf[0..4], None) .unwrap(); assert!(frontend .get_config(0x100, 4, VhostUserConfigFlags::WRITABLE, &buf[0..4]) .is_ok()); } #[test] fn test_frontend_get_config_negative3() { let (mut frontend, mut peer) = create_pair2(); let buf = vec![0x0; MAX_MSG_SIZE + 1]; let hdr = VhostUserMsgHeader::new(FrontendReq::GET_CONFIG, 0x4, 16); let mut msg = VhostUserConfig::new(0x100, 4, VhostUserConfigFlags::empty()); peer.send_message_with_payload(&hdr, &msg, &buf[0..4], None) .unwrap(); assert!(frontend .get_config(0x100, 4, VhostUserConfigFlags::WRITABLE, &buf[0..4]) .is_ok()); msg.offset = 0; peer.send_message_with_payload(&hdr, &msg, &buf[0..4], None) .unwrap(); assert!(frontend .get_config(0x100, 4, VhostUserConfigFlags::WRITABLE, &buf[0..4]) .is_err()); } #[test] fn test_frontend_get_config_negative4() { let (mut frontend, mut peer) = create_pair2(); let buf = vec![0x0; MAX_MSG_SIZE + 1]; let hdr = VhostUserMsgHeader::new(FrontendReq::GET_CONFIG, 0x4, 16); let mut msg = VhostUserConfig::new(0x100, 4, VhostUserConfigFlags::empty()); peer.send_message_with_payload(&hdr, &msg, &buf[0..4], None) .unwrap(); assert!(frontend .get_config(0x100, 4, VhostUserConfigFlags::WRITABLE, &buf[0..4]) .is_ok()); msg.offset = 0x101; peer.send_message_with_payload(&hdr, &msg, &buf[0..4], None) .unwrap(); assert!(frontend .get_config(0x100, 4, VhostUserConfigFlags::WRITABLE, &buf[0..4]) .is_err()); } #[test] fn test_frontend_get_config_negative5() { let (mut frontend, mut peer) = create_pair2(); let buf = vec![0x0; MAX_MSG_SIZE + 1]; let hdr = VhostUserMsgHeader::new(FrontendReq::GET_CONFIG, 0x4, 16); let mut msg = VhostUserConfig::new(0x100, 4, VhostUserConfigFlags::empty()); peer.send_message_with_payload(&hdr, &msg, &buf[0..4], None) .unwrap(); assert!(frontend .get_config(0x100, 4, VhostUserConfigFlags::WRITABLE, &buf[0..4]) .is_ok()); msg.offset = (MAX_MSG_SIZE + 1) as u32; peer.send_message_with_payload(&hdr, &msg, &buf[0..4], None) .unwrap(); assert!(frontend .get_config(0x100, 4, VhostUserConfigFlags::WRITABLE, &buf[0..4]) .is_err()); } #[test] fn test_frontend_get_config_negative6() { let (mut frontend, mut peer) = create_pair2(); let buf = vec![0x0; MAX_MSG_SIZE + 1]; let hdr = VhostUserMsgHeader::new(FrontendReq::GET_CONFIG, 0x4, 16); let mut msg = VhostUserConfig::new(0x100, 4, VhostUserConfigFlags::empty()); peer.send_message_with_payload(&hdr, &msg, &buf[0..4], None) .unwrap(); assert!(frontend .get_config(0x100, 4, VhostUserConfigFlags::WRITABLE, &buf[0..4]) .is_ok()); msg.size = 6; peer.send_message_with_payload(&hdr, &msg, &buf[0..6], None) .unwrap(); assert!(frontend .get_config(0x100, 4, VhostUserConfigFlags::WRITABLE, &buf[0..4]) .is_err()); } #[test] fn test_maset_set_mem_table_failure() { let (frontend, _peer) = create_pair2(); frontend.set_mem_table(&[]).unwrap_err(); let tables = vec![VhostUserMemoryRegionInfo::default(); MAX_ATTACHED_FD_ENTRIES + 1]; frontend.set_mem_table(&tables).unwrap_err(); } } vhost-0.10.0/src/vhost_user/frontend_req_handler.rs000064400000000000000000000425311046102023000205730ustar 00000000000000// Copyright (C) 2019-2021 Alibaba Cloud. All rights reserved. // SPDX-License-Identifier: Apache-2.0 use std::fs::File; use std::mem; use std::os::unix::io::{AsRawFd, RawFd}; use std::os::unix::net::UnixStream; use std::sync::{Arc, Mutex}; use super::connection::Endpoint; use super::message::*; use super::{Error, HandlerResult, Result}; /// Define services provided by frontends for the backend communication channel. /// /// The vhost-user specification defines a backend communication channel, by which backends could /// request services from frontends. The [VhostUserFrontendReqHandler] trait defines services provided /// by frontends, and it's used both on the frontend side and backend side. /// - on the backend side, a stub forwarder implementing [VhostUserFrontendReqHandler] will proxy /// service requests to frontends. The [Backend] is an example stub forwarder. /// - on the frontend side, the [FrontendReqHandler] will forward service requests to a handler /// implementing [VhostUserFrontendReqHandler]. /// /// The [VhostUserFrontendReqHandler] trait is design with interior mutability to improve performance /// for multi-threading. /// /// [VhostUserFrontendReqHandler]: trait.VhostUserFrontendReqHandler.html /// [FrontendReqHandler]: struct.FrontendReqHandler.html /// [Backend]: struct.Backend.html pub trait VhostUserFrontendReqHandler { /// Handle device configuration change notifications. fn handle_config_change(&self) -> HandlerResult { Err(std::io::Error::from_raw_os_error(libc::ENOSYS)) } /// Handle virtio-fs map file requests. fn fs_backend_map(&self, _fs: &VhostUserFSBackendMsg, _fd: &dyn AsRawFd) -> HandlerResult { Err(std::io::Error::from_raw_os_error(libc::ENOSYS)) } /// Handle virtio-fs unmap file requests. fn fs_backend_unmap(&self, _fs: &VhostUserFSBackendMsg) -> HandlerResult { Err(std::io::Error::from_raw_os_error(libc::ENOSYS)) } /// Handle virtio-fs sync file requests. fn fs_backend_sync(&self, _fs: &VhostUserFSBackendMsg) -> HandlerResult { Err(std::io::Error::from_raw_os_error(libc::ENOSYS)) } /// Handle virtio-fs file IO requests. fn fs_backend_io(&self, _fs: &VhostUserFSBackendMsg, _fd: &dyn AsRawFd) -> HandlerResult { Err(std::io::Error::from_raw_os_error(libc::ENOSYS)) } // fn handle_iotlb_msg(&mut self, iotlb: VhostUserIotlb); // fn handle_vring_host_notifier(&mut self, area: VhostUserVringArea, fd: &dyn AsRawFd); } /// A helper trait mirroring [VhostUserFrontendReqHandler] but without interior mutability. /// /// [VhostUserFrontendReqHandler]: trait.VhostUserFrontendReqHandler.html pub trait VhostUserFrontendReqHandlerMut { /// Handle device configuration change notifications. fn handle_config_change(&mut self) -> HandlerResult { Err(std::io::Error::from_raw_os_error(libc::ENOSYS)) } /// Handle virtio-fs map file requests. fn fs_backend_map( &mut self, _fs: &VhostUserFSBackendMsg, _fd: &dyn AsRawFd, ) -> HandlerResult { Err(std::io::Error::from_raw_os_error(libc::ENOSYS)) } /// Handle virtio-fs unmap file requests. fn fs_backend_unmap(&mut self, _fs: &VhostUserFSBackendMsg) -> HandlerResult { Err(std::io::Error::from_raw_os_error(libc::ENOSYS)) } /// Handle virtio-fs sync file requests. fn fs_backend_sync(&mut self, _fs: &VhostUserFSBackendMsg) -> HandlerResult { Err(std::io::Error::from_raw_os_error(libc::ENOSYS)) } /// Handle virtio-fs file IO requests. fn fs_backend_io( &mut self, _fs: &VhostUserFSBackendMsg, _fd: &dyn AsRawFd, ) -> HandlerResult { Err(std::io::Error::from_raw_os_error(libc::ENOSYS)) } // fn handle_iotlb_msg(&mut self, iotlb: VhostUserIotlb); // fn handle_vring_host_notifier(&mut self, area: VhostUserVringArea, fd: RawFd); } impl VhostUserFrontendReqHandler for Mutex { fn handle_config_change(&self) -> HandlerResult { self.lock().unwrap().handle_config_change() } fn fs_backend_map(&self, fs: &VhostUserFSBackendMsg, fd: &dyn AsRawFd) -> HandlerResult { self.lock().unwrap().fs_backend_map(fs, fd) } fn fs_backend_unmap(&self, fs: &VhostUserFSBackendMsg) -> HandlerResult { self.lock().unwrap().fs_backend_unmap(fs) } fn fs_backend_sync(&self, fs: &VhostUserFSBackendMsg) -> HandlerResult { self.lock().unwrap().fs_backend_sync(fs) } fn fs_backend_io(&self, fs: &VhostUserFSBackendMsg, fd: &dyn AsRawFd) -> HandlerResult { self.lock().unwrap().fs_backend_io(fs, fd) } } /// Server to handle service requests from backends from the backend communication channel. /// /// The [FrontendReqHandler] acts as a server on the frontend side, to handle service requests from /// backends on the backend communication channel. It's actually a proxy invoking the registered /// handler implementing [VhostUserFrontendReqHandler] to do the real work. /// /// [FrontendReqHandler]: struct.FrontendReqHandler.html /// [VhostUserFrontendReqHandler]: trait.VhostUserFrontendReqHandler.html pub struct FrontendReqHandler { // underlying Unix domain socket for communication sub_sock: Endpoint, tx_sock: UnixStream, // Protocol feature VHOST_USER_PROTOCOL_F_REPLY_ACK has been negotiated. reply_ack_negotiated: bool, // the VirtIO backend device object backend: Arc, // whether the endpoint has encountered any failure error: Option, } impl FrontendReqHandler { /// Create a server to handle service requests from backends on the backend communication channel. /// /// This opens a pair of connected anonymous sockets to form the backend communication channel. /// The socket fd returned by [Self::get_tx_raw_fd()] should be sent to the backend by /// [VhostUserFrontend::set_backend_request_fd()]. /// /// [Self::get_tx_raw_fd()]: struct.FrontendReqHandler.html#method.get_tx_raw_fd /// [VhostUserFrontend::set_backend_request_fd()]: trait.VhostUserFrontend.html#tymethod.set_backend_request_fd pub fn new(backend: Arc) -> Result { let (tx, rx) = UnixStream::pair().map_err(Error::SocketError)?; Ok(FrontendReqHandler { sub_sock: Endpoint::::from_stream(rx), tx_sock: tx, reply_ack_negotiated: false, backend, error: None, }) } /// Get the socket fd for the backend to communication with the frontend. /// /// The returned fd should be sent to the backend by [VhostUserFrontend::set_backend_request_fd()]. /// /// [VhostUserFrontend::set_backend_request_fd()]: trait.VhostUserFrontend.html#tymethod.set_backend_request_fd pub fn get_tx_raw_fd(&self) -> RawFd { self.tx_sock.as_raw_fd() } /// Set the negotiation state of the `VHOST_USER_PROTOCOL_F_REPLY_ACK` protocol feature. /// /// When the `VHOST_USER_PROTOCOL_F_REPLY_ACK` protocol feature has been negotiated, /// the "REPLY_ACK" flag will be set in the message header for every backend to frontend request /// message. pub fn set_reply_ack_flag(&mut self, enable: bool) { self.reply_ack_negotiated = enable; } /// Mark endpoint as failed or in normal state. pub fn set_failed(&mut self, error: i32) { if error == 0 { self.error = None; } else { self.error = Some(error); } } /// Main entrance to server backend request from the backend communication channel. /// /// The caller needs to: /// - serialize calls to this function /// - decide what to do when errer happens /// - optional recover from failure pub fn handle_request(&mut self) -> Result { // Return error if the endpoint is already in failed state. self.check_state()?; // The underlying communication channel is a Unix domain socket in // stream mode, and recvmsg() is a little tricky here. To successfully // receive attached file descriptors, we need to receive messages and // corresponding attached file descriptors in this way: // . recv messsage header and optional attached file // . validate message header // . recv optional message body and payload according size field in // message header // . validate message body and optional payload let (hdr, files) = self.sub_sock.recv_header()?; self.check_attached_files(&hdr, &files)?; let (size, buf) = match hdr.get_size() { 0 => (0, vec![0u8; 0]), len => { if len as usize > MAX_MSG_SIZE { return Err(Error::InvalidMessage); } let (size2, rbuf) = self.sub_sock.recv_data(len as usize)?; if size2 != len as usize { return Err(Error::InvalidMessage); } (size2, rbuf) } }; let res = match hdr.get_code() { Ok(BackendReq::CONFIG_CHANGE_MSG) => { self.check_msg_size(&hdr, size, 0)?; self.backend .handle_config_change() .map_err(Error::ReqHandlerError) } Ok(BackendReq::FS_MAP) => { let msg = self.extract_msg_body::(&hdr, size, &buf)?; // check_attached_files() has validated files self.backend .fs_backend_map(&msg, &files.unwrap()[0]) .map_err(Error::ReqHandlerError) } Ok(BackendReq::FS_UNMAP) => { let msg = self.extract_msg_body::(&hdr, size, &buf)?; self.backend .fs_backend_unmap(&msg) .map_err(Error::ReqHandlerError) } Ok(BackendReq::FS_SYNC) => { let msg = self.extract_msg_body::(&hdr, size, &buf)?; self.backend .fs_backend_sync(&msg) .map_err(Error::ReqHandlerError) } Ok(BackendReq::FS_IO) => { let msg = self.extract_msg_body::(&hdr, size, &buf)?; // check_attached_files() has validated files self.backend .fs_backend_io(&msg, &files.unwrap()[0]) .map_err(Error::ReqHandlerError) } _ => Err(Error::InvalidMessage), }; self.send_ack_message(&hdr, &res)?; res } fn check_state(&self) -> Result<()> { match self.error { Some(e) => Err(Error::SocketBroken(std::io::Error::from_raw_os_error(e))), None => Ok(()), } } fn check_msg_size( &self, hdr: &VhostUserMsgHeader, size: usize, expected: usize, ) -> Result<()> { if hdr.get_size() as usize != expected || hdr.is_reply() || hdr.get_version() != 0x1 || size != expected { return Err(Error::InvalidMessage); } Ok(()) } fn check_attached_files( &self, hdr: &VhostUserMsgHeader, files: &Option>, ) -> Result<()> { match hdr.get_code() { Ok(BackendReq::FS_MAP | BackendReq::FS_IO) => { // Expect a single file is passed. match files { Some(files) if files.len() == 1 => Ok(()), _ => Err(Error::InvalidMessage), } } _ if files.is_some() => Err(Error::InvalidMessage), _ => Ok(()), } } fn extract_msg_body( &self, hdr: &VhostUserMsgHeader, size: usize, buf: &[u8], ) -> Result { self.check_msg_size(hdr, size, mem::size_of::())?; // SAFETY: Safe because we checked that `buf` size is equal to T size. let msg = unsafe { std::ptr::read_unaligned(buf.as_ptr() as *const T) }; if !msg.is_valid() { return Err(Error::InvalidMessage); } Ok(msg) } fn new_reply_header( &self, req: &VhostUserMsgHeader, ) -> Result> { if mem::size_of::() > MAX_MSG_SIZE { return Err(Error::InvalidParam); } self.check_state()?; Ok(VhostUserMsgHeader::new( req.get_code()?, VhostUserHeaderFlag::REPLY.bits(), mem::size_of::() as u32, )) } fn send_ack_message( &mut self, req: &VhostUserMsgHeader, res: &Result, ) -> Result<()> { if self.reply_ack_negotiated && req.is_need_reply() { let hdr = self.new_reply_header::(req)?; let def_err = libc::EINVAL; let val = match res { Ok(n) => *n, Err(e) => match e { Error::ReqHandlerError(ioerr) => match ioerr.raw_os_error() { Some(rawerr) => -rawerr as u64, None => -def_err as u64, }, _ => -def_err as u64, }, }; let msg = VhostUserU64::new(val); self.sub_sock.send_message(&hdr, &msg, None)?; } Ok(()) } } impl AsRawFd for FrontendReqHandler { fn as_raw_fd(&self) -> RawFd { self.sub_sock.as_raw_fd() } } #[cfg(test)] mod tests { use super::*; #[cfg(feature = "vhost-user-backend")] use crate::vhost_user::Backend; #[cfg(feature = "vhost-user-backend")] use std::os::unix::io::FromRawFd; struct MockFrontendReqHandler {} impl VhostUserFrontendReqHandlerMut for MockFrontendReqHandler { /// Handle virtio-fs map file requests from the backend. fn fs_backend_map( &mut self, _fs: &VhostUserFSBackendMsg, _fd: &dyn AsRawFd, ) -> HandlerResult { Ok(0) } /// Handle virtio-fs unmap file requests from the backend. fn fs_backend_unmap(&mut self, _fs: &VhostUserFSBackendMsg) -> HandlerResult { Err(std::io::Error::from_raw_os_error(libc::ENOSYS)) } } #[test] fn test_new_frontend_req_handler() { let backend = Arc::new(Mutex::new(MockFrontendReqHandler {})); let mut handler = FrontendReqHandler::new(backend).unwrap(); assert!(handler.get_tx_raw_fd() >= 0); assert!(handler.as_raw_fd() >= 0); handler.check_state().unwrap(); assert_eq!(handler.error, None); handler.set_failed(libc::EAGAIN); assert_eq!(handler.error, Some(libc::EAGAIN)); handler.check_state().unwrap_err(); } #[cfg(feature = "vhost-user-backend")] #[test] fn test_frontend_backend_req_handler() { let backend = Arc::new(Mutex::new(MockFrontendReqHandler {})); let mut handler = FrontendReqHandler::new(backend).unwrap(); // SAFETY: Safe because `handler` contains valid fds, and we are // checking if `dup` returns a valid fd. let fd = unsafe { libc::dup(handler.get_tx_raw_fd()) }; if fd < 0 { panic!("failed to duplicated tx fd!"); } // SAFETY: Safe because we checked if fd is valid. let stream = unsafe { UnixStream::from_raw_fd(fd) }; let backend = Backend::from_stream(stream); std::thread::spawn(move || { let res = handler.handle_request().unwrap(); assert_eq!(res, 0); handler.handle_request().unwrap_err(); }); backend .fs_backend_map(&VhostUserFSBackendMsg::default(), &fd) .unwrap(); // When REPLY_ACK has not been negotiated, the frontend has no way to detect failure from // backend side. backend .fs_backend_unmap(&VhostUserFSBackendMsg::default()) .unwrap(); } #[cfg(feature = "vhost-user-backend")] #[test] fn test_frontend_backend_req_handler_with_ack() { let backend = Arc::new(Mutex::new(MockFrontendReqHandler {})); let mut handler = FrontendReqHandler::new(backend).unwrap(); handler.set_reply_ack_flag(true); // SAFETY: Safe because `handler` contains valid fds, and we are // checking if `dup` returns a valid fd. let fd = unsafe { libc::dup(handler.get_tx_raw_fd()) }; if fd < 0 { panic!("failed to duplicated tx fd!"); } // SAFETY: Safe because we checked if fd is valid. let stream = unsafe { UnixStream::from_raw_fd(fd) }; let backend = Backend::from_stream(stream); std::thread::spawn(move || { let res = handler.handle_request().unwrap(); assert_eq!(res, 0); handler.handle_request().unwrap_err(); }); backend.set_reply_ack_flag(true); backend .fs_backend_map(&VhostUserFSBackendMsg::default(), &fd) .unwrap(); backend .fs_backend_unmap(&VhostUserFSBackendMsg::default()) .unwrap_err(); } } vhost-0.10.0/src/vhost_user/message.rs000064400000000000000000001234351046102023000160370ustar 00000000000000// Copyright (C) 2019 Alibaba Cloud Computing. All rights reserved. // SPDX-License-Identifier: Apache-2.0 //! Define communication messages for the vhost-user protocol. //! //! For message definition, please refer to the [vhost-user spec](https://qemu.readthedocs.io/en/latest/interop/vhost-user.html). #![allow(dead_code)] #![allow(non_camel_case_types)] #![allow(clippy::upper_case_acronyms)] use std::fmt::Debug; use std::fs::File; use std::io; use std::marker::PhantomData; use std::ops::Deref; use vm_memory::{mmap::NewBitmap, ByteValued, Error as MmapError, FileOffset, MmapRegion}; #[cfg(feature = "xen")] use vm_memory::{GuestAddress, MmapRange, MmapXenFlags}; use super::{Error, Result}; use crate::VringConfigData; /// The vhost-user specification uses a field of u32 to store message length. /// On the other hand, preallocated buffers are needed to receive messages from the Unix domain /// socket. To preallocating a 4GB buffer for each vhost-user message is really just an overhead. /// Among all defined vhost-user messages, only the VhostUserConfig and VhostUserMemory has variable /// message size. For the VhostUserConfig, a maximum size of 4K is enough because the user /// configuration space for virtio devices is (4K - 0x100) bytes at most. For the VhostUserMemory, /// 4K should be enough too because it can support 255 memory regions at most. pub const MAX_MSG_SIZE: usize = 0x1000; /// The VhostUserMemory message has variable message size and variable number of attached file /// descriptors. Each user memory region entry in the message payload occupies 32 bytes, /// so setting maximum number of attached file descriptors based on the maximum message size. /// But rust only implements Default and AsMut traits for arrays with 0 - 32 entries, so further /// reduce the maximum number... // pub const MAX_ATTACHED_FD_ENTRIES: usize = (MAX_MSG_SIZE - 8) / 32; pub const MAX_ATTACHED_FD_ENTRIES: usize = 32; /// Starting position (inclusion) of the device configuration space in virtio devices. pub const VHOST_USER_CONFIG_OFFSET: u32 = 0x100; /// Ending position (exclusion) of the device configuration space in virtio devices. pub const VHOST_USER_CONFIG_SIZE: u32 = 0x1000; /// Maximum number of vrings supported. pub const VHOST_USER_MAX_VRINGS: u64 = 0x8000u64; pub(super) trait Req: Clone + Copy + Debug + PartialEq + Eq + PartialOrd + Ord + Send + Sync + Into + TryFrom { } macro_rules! enum_value { ( $(#[$meta:meta])* $vis:vis enum $enum:ident: $T:tt { $( $(#[$variant_meta:meta])* $variant:ident $(= $val:expr)?, )* } ) => { #[repr($T)] $(#[$meta])* $vis enum $enum { $($(#[$variant_meta])* $variant $(= $val)?,)* } impl std::convert::TryFrom<$T> for $enum { type Error = (); fn try_from(v: $T) -> std::result::Result { match v { $(v if v == $enum::$variant as $T => Ok($enum::$variant),)* _ => Err(()), } } } impl std::convert::From<$enum> for $T { fn from(v: $enum) -> $T { v as $T } } } } enum_value! { #[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord)] /// Type of requests sending from frontends to backends. pub enum FrontendReq: u32 { /// Get from the underlying vhost implementation the features bit mask. GET_FEATURES = 1, /// Enable features in the underlying vhost implementation using a bit mask. SET_FEATURES = 2, /// Set the current Frontend as an owner of the session. SET_OWNER = 3, /// No longer used. RESET_OWNER = 4, /// Set the memory map regions on the backend so it can translate the vring addresses. SET_MEM_TABLE = 5, /// Set logging shared memory space. SET_LOG_BASE = 6, /// Set the logging file descriptor, which is passed as ancillary data. SET_LOG_FD = 7, /// Set the size of the queue. SET_VRING_NUM = 8, /// Set the addresses of the different aspects of the vring. SET_VRING_ADDR = 9, /// Set the base offset in the available vring. SET_VRING_BASE = 10, /// Get the available vring base offset. GET_VRING_BASE = 11, /// Set the event file descriptor for adding buffers to the vring. SET_VRING_KICK = 12, /// Set the event file descriptor to signal when buffers are used. SET_VRING_CALL = 13, /// Set the event file descriptor to signal when error occurs. SET_VRING_ERR = 14, /// Get the protocol feature bit mask from the underlying vhost implementation. GET_PROTOCOL_FEATURES = 15, /// Enable protocol features in the underlying vhost implementation. SET_PROTOCOL_FEATURES = 16, /// Query how many queues the backend supports. GET_QUEUE_NUM = 17, /// Signal backend to enable or disable corresponding vring. SET_VRING_ENABLE = 18, /// Ask vhost user backend to broadcast a fake RARP to notify the migration is terminated /// for guest that does not support GUEST_ANNOUNCE. SEND_RARP = 19, /// Set host MTU value exposed to the guest. NET_SET_MTU = 20, /// Set the socket file descriptor for backend initiated requests. SET_BACKEND_REQ_FD = 21, /// Send IOTLB messages with struct vhost_iotlb_msg as payload. IOTLB_MSG = 22, /// Set the endianness of a VQ for legacy devices. SET_VRING_ENDIAN = 23, /// Fetch the contents of the virtio device configuration space. GET_CONFIG = 24, /// Change the contents of the virtio device configuration space. SET_CONFIG = 25, /// Create a session for crypto operation. CREATE_CRYPTO_SESSION = 26, /// Close a session for crypto operation. CLOSE_CRYPTO_SESSION = 27, /// Advise backend that a migration with postcopy enabled is underway. POSTCOPY_ADVISE = 28, /// Advise backend that a transition to postcopy mode has happened. POSTCOPY_LISTEN = 29, /// Advise that postcopy migration has now completed. POSTCOPY_END = 30, /// Get a shared buffer from backend. GET_INFLIGHT_FD = 31, /// Send the shared inflight buffer back to backend. SET_INFLIGHT_FD = 32, /// Sets the GPU protocol socket file descriptor. GPU_SET_SOCKET = 33, /// Ask the vhost user backend to disable all rings and reset all internal /// device state to the initial state. RESET_DEVICE = 34, /// Indicate that a buffer was added to the vring instead of signalling it /// using the vring’s kick file descriptor. VRING_KICK = 35, /// Return a u64 payload containing the maximum number of memory slots. GET_MAX_MEM_SLOTS = 36, /// Update the memory tables by adding the region described. ADD_MEM_REG = 37, /// Update the memory tables by removing the region described. REM_MEM_REG = 38, /// Notify the backend with updated device status as defined in the VIRTIO /// specification. SET_STATUS = 39, /// Query the backend for its device status as defined in the VIRTIO /// specification. GET_STATUS = 40, } } impl Req for FrontendReq {} enum_value! { /// Type of requests sending from backends to frontends. #[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord)] pub enum BackendReq: u32 { /// Send IOTLB messages with struct vhost_iotlb_msg as payload. IOTLB_MSG = 1, /// Notify that the virtio device's configuration space has changed. CONFIG_CHANGE_MSG = 2, /// Set host notifier for a specified queue. VRING_HOST_NOTIFIER_MSG = 3, /// Indicate that a buffer was used from the vring. VRING_CALL = 4, /// Indicate that an error occurred on the specific vring. VRING_ERR = 5, /// Virtio-fs draft: map file content into the window. FS_MAP = 6, /// Virtio-fs draft: unmap file content from the window. FS_UNMAP = 7, /// Virtio-fs draft: sync file content. FS_SYNC = 8, /// Virtio-fs draft: perform a read/write from an fd directly to GPA. FS_IO = 9, } } impl Req for BackendReq {} /// Vhost message Validator. pub trait VhostUserMsgValidator: ByteValued { /// Validate message syntax only. /// It doesn't validate message semantics such as protocol version number and dependency /// on feature flags etc. fn is_valid(&self) -> bool { true } } // Bit mask for common message flags. bitflags! { /// Common message flags for vhost-user requests and replies. pub struct VhostUserHeaderFlag: u32 { /// Bits[0..2] is message version number. const VERSION = 0x3; /// Mark message as reply. const REPLY = 0x4; /// Sender anticipates a reply message from the peer. const NEED_REPLY = 0x8; /// All valid bits. const ALL_FLAGS = 0xc; /// All reserved bits. const RESERVED_BITS = !0xf; } } /// Common message header for vhost-user requests and replies. /// A vhost-user message consists of 3 header fields and an optional payload. All numbers are in the /// machine native byte order. #[repr(C, packed)] #[derive(Copy)] pub(super) struct VhostUserMsgHeader { request: u32, flags: u32, size: u32, _r: PhantomData, } impl Debug for VhostUserMsgHeader { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.debug_struct("VhostUserMsgHeader") .field("request", &{ self.request }) .field("flags", &{ self.flags }) .field("size", &{ self.size }) .finish() } } impl Clone for VhostUserMsgHeader { fn clone(&self) -> VhostUserMsgHeader { *self } } impl PartialEq for VhostUserMsgHeader { fn eq(&self, other: &Self) -> bool { self.request == other.request && self.flags == other.flags && self.size == other.size } } impl VhostUserMsgHeader { /// Create a new instance of `VhostUserMsgHeader`. pub fn new(request: R, flags: u32, size: u32) -> Self { // Default to protocol version 1 let fl = (flags & VhostUserHeaderFlag::ALL_FLAGS.bits()) | 0x1; VhostUserMsgHeader { request: request.into(), flags: fl, size, _r: PhantomData, } } /// Get message type. pub fn get_code(&self) -> Result { R::try_from(self.request).map_err(|_| Error::InvalidMessage) } /// Set message type. pub fn set_code(&mut self, request: R) { self.request = request.into(); } /// Get message version number. pub fn get_version(&self) -> u32 { self.flags & 0x3 } /// Set message version number. pub fn set_version(&mut self, ver: u32) { self.flags &= !0x3; self.flags |= ver & 0x3; } /// Check whether it's a reply message. pub fn is_reply(&self) -> bool { (self.flags & VhostUserHeaderFlag::REPLY.bits()) != 0 } /// Mark message as reply. pub fn set_reply(&mut self, is_reply: bool) { if is_reply { self.flags |= VhostUserHeaderFlag::REPLY.bits(); } else { self.flags &= !VhostUserHeaderFlag::REPLY.bits(); } } /// Check whether reply for this message is requested. pub fn is_need_reply(&self) -> bool { (self.flags & VhostUserHeaderFlag::NEED_REPLY.bits()) != 0 } /// Mark that reply for this message is needed. pub fn set_need_reply(&mut self, need_reply: bool) { if need_reply { self.flags |= VhostUserHeaderFlag::NEED_REPLY.bits(); } else { self.flags &= !VhostUserHeaderFlag::NEED_REPLY.bits(); } } /// Check whether it's the reply message for the request `req`. pub fn is_reply_for(&self, req: &VhostUserMsgHeader) -> bool { if let (Ok(code1), Ok(code2)) = (self.get_code(), req.get_code()) { self.is_reply() && !req.is_reply() && code1 == code2 } else { false } } /// Get message size. pub fn get_size(&self) -> u32 { self.size } /// Set message size. pub fn set_size(&mut self, size: u32) { self.size = size; } } impl Default for VhostUserMsgHeader { fn default() -> Self { VhostUserMsgHeader { request: 0, flags: 0x1, size: 0, _r: PhantomData, } } } // SAFETY: Safe because all fields of VhostUserMsgHeader are POD. unsafe impl ByteValued for VhostUserMsgHeader {} impl VhostUserMsgValidator for VhostUserMsgHeader { #[allow(clippy::if_same_then_else)] fn is_valid(&self) -> bool { if self.get_code().is_err() { return false; } else if self.size as usize > MAX_MSG_SIZE { return false; } else if self.get_version() != 0x1 { return false; } else if (self.flags & VhostUserHeaderFlag::RESERVED_BITS.bits()) != 0 { return false; } true } } // Bit mask for transport specific flags in VirtIO feature set defined by vhost-user. bitflags! { #[derive(Copy, Clone, Debug, Eq, PartialEq)] /// Transport specific flags in VirtIO feature set defined by vhost-user. pub struct VhostUserVirtioFeatures: u64 { /// Feature flag for the protocol feature. const PROTOCOL_FEATURES = 0x4000_0000; } } // Bit mask for vhost-user protocol feature flags. bitflags! { #[derive(Copy, Clone, Debug, Eq, PartialEq)] /// Vhost-user protocol feature flags. pub struct VhostUserProtocolFeatures: u64 { /// Support multiple queues. const MQ = 0x0000_0001; /// Support logging through shared memory fd. const LOG_SHMFD = 0x0000_0002; /// Support broadcasting fake RARP packet. const RARP = 0x0000_0004; /// Support sending reply messages for requests with NEED_REPLY flag set. const REPLY_ACK = 0x0000_0008; /// Support setting MTU for virtio-net devices. const MTU = 0x0000_0010; /// Allow the backend to send requests to the frontend by an optional communication channel. const BACKEND_REQ = 0x0000_0020; /// Support setting backend endian by SET_VRING_ENDIAN. const CROSS_ENDIAN = 0x0000_0040; /// Support crypto operations. const CRYPTO_SESSION = 0x0000_0080; /// Support sending userfault_fd from backends to frontends. const PAGEFAULT = 0x0000_0100; /// Support Virtio device configuration. const CONFIG = 0x0000_0200; /// Allow the backend to send fds (at most 8 descriptors in each message) to the frontend. const BACKEND_SEND_FD = 0x0000_0400; /// Allow the backend to register a host notifier. const HOST_NOTIFIER = 0x0000_0800; /// Support inflight shmfd. const INFLIGHT_SHMFD = 0x0000_1000; /// Support resetting the device. const RESET_DEVICE = 0x0000_2000; /// Support inband notifications. const INBAND_NOTIFICATIONS = 0x0000_4000; /// Support configuring memory slots. const CONFIGURE_MEM_SLOTS = 0x0000_8000; /// Support reporting status. const STATUS = 0x0001_0000; /// Support Xen mmap. const XEN_MMAP = 0x0002_0000; } } /// A generic message to encapsulate a 64-bit value. #[repr(transparent)] #[derive(Copy, Clone, Default)] pub struct VhostUserU64 { /// The encapsulated 64-bit common value. pub value: u64, } impl VhostUserU64 { /// Create a new instance. pub fn new(value: u64) -> Self { VhostUserU64 { value } } } // SAFETY: Safe because all fields of VhostUserU64 are POD. unsafe impl ByteValued for VhostUserU64 {} impl VhostUserMsgValidator for VhostUserU64 {} /// Memory region descriptor for the SET_MEM_TABLE request. #[repr(C, packed)] #[derive(Copy, Clone, Default)] pub struct VhostUserMemory { /// Number of memory regions in the payload. pub num_regions: u32, /// Padding for alignment. pub padding1: u32, } impl VhostUserMemory { /// Create a new instance. pub fn new(cnt: u32) -> Self { VhostUserMemory { num_regions: cnt, padding1: 0, } } } // SAFETY: Safe because all fields of VhostUserMemory are POD. unsafe impl ByteValued for VhostUserMemory {} impl VhostUserMsgValidator for VhostUserMemory { #[allow(clippy::if_same_then_else)] fn is_valid(&self) -> bool { if self.padding1 != 0 { return false; } else if self.num_regions == 0 || self.num_regions > MAX_ATTACHED_FD_ENTRIES as u32 { return false; } true } } /// Memory region descriptors as payload for the SET_MEM_TABLE request. #[repr(C, packed)] #[derive(Default, Clone, Copy)] pub struct VhostUserMemoryRegion { /// Guest physical address of the memory region. pub guest_phys_addr: u64, /// Size of the memory region. pub memory_size: u64, /// Virtual address in the current process. pub user_addr: u64, /// Offset where region starts in the mapped memory. pub mmap_offset: u64, #[cfg(feature = "xen")] /// Xen specific flags. pub xen_mmap_flags: u32, #[cfg(feature = "xen")] /// Xen specific data. pub xen_mmap_data: u32, } impl VhostUserMemoryRegion { fn is_valid_common(&self) -> bool { self.memory_size != 0 && self.guest_phys_addr.checked_add(self.memory_size).is_some() && self.user_addr.checked_add(self.memory_size).is_some() && self.mmap_offset.checked_add(self.memory_size).is_some() } } #[cfg(not(feature = "xen"))] impl VhostUserMemoryRegion { /// Create a new instance. pub fn new(guest_phys_addr: u64, memory_size: u64, user_addr: u64, mmap_offset: u64) -> Self { VhostUserMemoryRegion { guest_phys_addr, memory_size, user_addr, mmap_offset, } } /// Creates mmap region from Self. pub fn mmap_region(&self, file: File) -> Result> { MmapRegion::::from_file( FileOffset::new(file, self.mmap_offset), self.memory_size as usize, ) .map_err(MmapError::MmapRegion) .map_err(|e| Error::ReqHandlerError(io::Error::new(io::ErrorKind::Other, e))) } fn is_valid(&self) -> bool { self.is_valid_common() } } #[cfg(feature = "xen")] impl VhostUserMemoryRegion { /// Create a new instance. pub fn with_xen( guest_phys_addr: u64, memory_size: u64, user_addr: u64, mmap_offset: u64, xen_mmap_flags: u32, xen_mmap_data: u32, ) -> Self { VhostUserMemoryRegion { guest_phys_addr, memory_size, user_addr, mmap_offset, xen_mmap_flags, xen_mmap_data, } } /// Creates mmap region from Self. pub fn mmap_region(&self, file: File) -> Result> { let range = MmapRange::new( self.memory_size as usize, Some(FileOffset::new(file, self.mmap_offset)), GuestAddress(self.guest_phys_addr), self.xen_mmap_flags, self.xen_mmap_data, ); MmapRegion::::from_range(range) .map_err(MmapError::MmapRegion) .map_err(|e| Error::ReqHandlerError(io::Error::new(io::ErrorKind::Other, e))) } fn is_valid(&self) -> bool { if !self.is_valid_common() { false } else { // Only of one of FOREIGN or GRANT should be set. match MmapXenFlags::from_bits(self.xen_mmap_flags) { Some(flags) => flags.is_valid(), None => false, } } } } // SAFETY: Safe because all fields of VhostUserMemoryRegion are POD. unsafe impl ByteValued for VhostUserMemoryRegion {} impl VhostUserMsgValidator for VhostUserMemoryRegion { fn is_valid(&self) -> bool { self.is_valid() } } /// Payload of the VhostUserMemory message. pub type VhostUserMemoryPayload = Vec; /// Single memory region descriptor as payload for ADD_MEM_REG and REM_MEM_REG /// requests. #[repr(C)] #[derive(Default, Clone, Copy)] pub struct VhostUserSingleMemoryRegion { /// Padding for correct alignment padding: u64, /// General memory region region: VhostUserMemoryRegion, } impl Deref for VhostUserSingleMemoryRegion { type Target = VhostUserMemoryRegion; fn deref(&self) -> &VhostUserMemoryRegion { &self.region } } #[cfg(not(feature = "xen"))] impl VhostUserSingleMemoryRegion { /// Create a new instance. pub fn new(guest_phys_addr: u64, memory_size: u64, user_addr: u64, mmap_offset: u64) -> Self { VhostUserSingleMemoryRegion { padding: 0, region: VhostUserMemoryRegion::new( guest_phys_addr, memory_size, user_addr, mmap_offset, ), } } } #[cfg(feature = "xen")] impl VhostUserSingleMemoryRegion { /// Create a new instance. pub fn new( guest_phys_addr: u64, memory_size: u64, user_addr: u64, mmap_offset: u64, xen_mmap_flags: u32, xen_mmap_data: u32, ) -> Self { VhostUserSingleMemoryRegion { padding: 0, region: VhostUserMemoryRegion::with_xen( guest_phys_addr, memory_size, user_addr, mmap_offset, xen_mmap_flags, xen_mmap_data, ), } } } // SAFETY: Safe because all fields of VhostUserSingleMemoryRegion are POD. unsafe impl ByteValued for VhostUserSingleMemoryRegion {} impl VhostUserMsgValidator for VhostUserSingleMemoryRegion {} /// Vring state descriptor. #[repr(C, packed)] #[derive(Copy, Clone, Default)] pub struct VhostUserVringState { /// Vring index. pub index: u32, /// A common 32bit value to encapsulate vring state etc. pub num: u32, } impl VhostUserVringState { /// Create a new instance. pub fn new(index: u32, num: u32) -> Self { VhostUserVringState { index, num } } } // SAFETY: Safe because all fields of VhostUserVringState are POD. unsafe impl ByteValued for VhostUserVringState {} impl VhostUserMsgValidator for VhostUserVringState {} // Bit mask for vring address flags. bitflags! { /// Flags for vring address. pub struct VhostUserVringAddrFlags: u32 { /// Support log of vring operations. /// Modifications to "used" vring should be logged. const VHOST_VRING_F_LOG = 0x1; } } /// Vring address descriptor. #[repr(C, packed)] #[derive(Copy, Clone, Default)] pub struct VhostUserVringAddr { /// Vring index. pub index: u32, /// Vring flags defined by VhostUserVringAddrFlags. pub flags: u32, /// Ring address of the vring descriptor table. pub descriptor: u64, /// Ring address of the vring used ring. pub used: u64, /// Ring address of the vring available ring. pub available: u64, /// Guest address for logging. pub log: u64, } impl VhostUserVringAddr { /// Create a new instance. pub fn new( index: u32, flags: VhostUserVringAddrFlags, descriptor: u64, used: u64, available: u64, log: u64, ) -> Self { VhostUserVringAddr { index, flags: flags.bits(), descriptor, used, available, log, } } /// Create a new instance from `VringConfigData`. #[cfg_attr(feature = "cargo-clippy", allow(clippy::useless_conversion))] pub fn from_config_data(index: u32, config_data: &VringConfigData) -> Self { let log_addr = config_data.log_addr.unwrap_or(0); VhostUserVringAddr { index, flags: config_data.flags, descriptor: config_data.desc_table_addr, used: config_data.used_ring_addr, available: config_data.avail_ring_addr, log: log_addr, } } } // SAFETY: Safe because all fields of VhostUserVringAddr are POD. unsafe impl ByteValued for VhostUserVringAddr {} impl VhostUserMsgValidator for VhostUserVringAddr { #[allow(clippy::if_same_then_else)] fn is_valid(&self) -> bool { if (self.flags & !VhostUserVringAddrFlags::all().bits()) != 0 { return false; } else if self.descriptor & 0xf != 0 { return false; } else if self.available & 0x1 != 0 { return false; } else if self.used & 0x3 != 0 { return false; } true } } // Bit mask for the vhost-user device configuration message. bitflags! { #[derive(Copy, Clone, Debug, Eq, PartialEq)] /// Flags for the device configuration message. pub struct VhostUserConfigFlags: u32 { /// Vhost frontend messages used for writeable fields. const WRITABLE = 0x1; /// Vhost frontend messages used for live migration. const LIVE_MIGRATION = 0x2; } } /// Message to read/write device configuration space. #[repr(C, packed)] #[derive(Copy, Clone, Default)] pub struct VhostUserConfig { /// Offset of virtio device's configuration space. pub offset: u32, /// Configuration space access size in bytes. pub size: u32, /// Flags for the device configuration operation. pub flags: u32, } impl VhostUserConfig { /// Create a new instance. pub fn new(offset: u32, size: u32, flags: VhostUserConfigFlags) -> Self { VhostUserConfig { offset, size, flags: flags.bits(), } } } // SAFETY: Safe because all fields of VhostUserConfig are POD. unsafe impl ByteValued for VhostUserConfig {} impl VhostUserMsgValidator for VhostUserConfig { #[allow(clippy::if_same_then_else)] fn is_valid(&self) -> bool { let end_addr = match self.size.checked_add(self.offset) { Some(addr) => addr, None => return false, }; if (self.flags & !VhostUserConfigFlags::all().bits()) != 0 { return false; } else if self.size == 0 || end_addr > VHOST_USER_CONFIG_SIZE { return false; } true } } /// Payload for the VhostUserConfig message. pub type VhostUserConfigPayload = Vec; /// Single memory region descriptor as payload for ADD_MEM_REG and REM_MEM_REG /// requests. #[repr(C)] #[derive(Copy, Clone, Default)] pub struct VhostUserInflight { /// Size of the area to track inflight I/O. pub mmap_size: u64, /// Offset of this area from the start of the supplied file descriptor. pub mmap_offset: u64, /// Number of virtqueues. pub num_queues: u16, /// Size of virtqueues. pub queue_size: u16, } impl VhostUserInflight { /// Create a new instance. pub fn new(mmap_size: u64, mmap_offset: u64, num_queues: u16, queue_size: u16) -> Self { VhostUserInflight { mmap_size, mmap_offset, num_queues, queue_size, } } } // SAFETY: Safe because all fields of VhostUserInflight are POD. unsafe impl ByteValued for VhostUserInflight {} impl VhostUserMsgValidator for VhostUserInflight { fn is_valid(&self) -> bool { if self.num_queues == 0 || self.queue_size == 0 { return false; } true } } /// Single memory region descriptor as payload for SET_LOG_BASE request. #[repr(C)] #[derive(Copy, Clone, Default)] pub struct VhostUserLog { /// Size of the area to log dirty pages. pub mmap_size: u64, /// Offset of this area from the start of the supplied file descriptor. pub mmap_offset: u64, } impl VhostUserLog { /// Create a new instance. pub fn new(mmap_size: u64, mmap_offset: u64) -> Self { VhostUserLog { mmap_size, mmap_offset, } } } // SAFETY: Safe because all fields of VhostUserLog are POD. unsafe impl ByteValued for VhostUserLog {} impl VhostUserMsgValidator for VhostUserLog { fn is_valid(&self) -> bool { if self.mmap_size == 0 || self.mmap_offset.checked_add(self.mmap_size).is_none() { return false; } true } } // Bit mask for flags in virtio-fs backend messages bitflags! { #[derive(Copy, Clone, Debug, Eq, PartialEq, Default)] /// Flags for virtio-fs backend messages. pub struct VhostUserFSBackendMsgFlags: u64 { /// Empty permission. const EMPTY = 0x0; /// Read permission. const MAP_R = 0x1; /// Write permission. const MAP_W = 0x2; } } /// Max entries in one virtio-fs backend request. pub const VHOST_USER_FS_BACKEND_ENTRIES: usize = 8; /// Backend request message to update the MMIO window. #[repr(C, packed)] #[derive(Copy, Clone, Default)] pub struct VhostUserFSBackendMsg { /// File offset. pub fd_offset: [u64; VHOST_USER_FS_BACKEND_ENTRIES], /// Offset into the DAX window. pub cache_offset: [u64; VHOST_USER_FS_BACKEND_ENTRIES], /// Size of region to map. pub len: [u64; VHOST_USER_FS_BACKEND_ENTRIES], /// Flags for the mmap operation pub flags: [VhostUserFSBackendMsgFlags; VHOST_USER_FS_BACKEND_ENTRIES], } // SAFETY: Safe because all fields of VhostUserFSBackendMsg are POD. unsafe impl ByteValued for VhostUserFSBackendMsg {} impl VhostUserMsgValidator for VhostUserFSBackendMsg { fn is_valid(&self) -> bool { for i in 0..VHOST_USER_FS_BACKEND_ENTRIES { if ({ self.flags[i] }.bits() & !VhostUserFSBackendMsgFlags::all().bits()) != 0 || self.fd_offset[i].checked_add(self.len[i]).is_none() || self.cache_offset[i].checked_add(self.len[i]).is_none() { return false; } } true } } /// Inflight I/O descriptor state for split virtqueues #[repr(C, packed)] #[derive(Clone, Copy, Default)] pub struct DescStateSplit { /// Indicate whether this descriptor (only head) is inflight or not. pub inflight: u8, /// Padding padding: [u8; 5], /// List of last batch of used descriptors, only when batching is used for submitting pub next: u16, /// Preserve order of fetching available descriptors, only for head descriptor pub counter: u64, } impl DescStateSplit { /// New instance of DescStateSplit struct pub fn new() -> Self { Self::default() } } /// Inflight I/O queue region for split virtqueues #[repr(C, packed)] pub struct QueueRegionSplit { /// Features flags of this region pub features: u64, /// Version of this region pub version: u16, /// Number of DescStateSplit entries pub desc_num: u16, /// List to track last batch of used descriptors pub last_batch_head: u16, /// Idx value of used ring pub used_idx: u16, /// Pointer to an array of DescStateSplit entries pub desc: u64, } impl QueueRegionSplit { /// New instance of QueueRegionSplit struct pub fn new(features: u64, queue_size: u16) -> Self { QueueRegionSplit { features, version: 1, desc_num: queue_size, last_batch_head: 0, used_idx: 0, desc: 0, } } } /// Inflight I/O descriptor state for packed virtqueues #[repr(C, packed)] #[derive(Clone, Copy, Default)] pub struct DescStatePacked { /// Indicate whether this descriptor (only head) is inflight or not. pub inflight: u8, /// Padding padding: u8, /// Link to next free entry pub next: u16, /// Link to last entry of descriptor list, only for head pub last: u16, /// Length of descriptor list, only for head pub num: u16, /// Preserve order of fetching avail descriptors, only for head pub counter: u64, /// Buffer ID pub id: u16, /// Descriptor flags pub flags: u16, /// Buffer length pub len: u32, /// Buffer address pub addr: u64, } impl DescStatePacked { /// New instance of DescStatePacked struct pub fn new() -> Self { Self::default() } } /// Inflight I/O queue region for packed virtqueues #[repr(C, packed)] pub struct QueueRegionPacked { /// Features flags of this region pub features: u64, /// version of this region pub version: u16, /// size of descriptor state array pub desc_num: u16, /// head of free DescStatePacked entry list pub free_head: u16, /// old head of free DescStatePacked entry list pub old_free_head: u16, /// used idx of descriptor ring pub used_idx: u16, /// old used idx of descriptor ring pub old_used_idx: u16, /// device ring wrap counter pub used_wrap_counter: u8, /// old device ring wrap counter pub old_used_wrap_counter: u8, /// Padding padding: [u8; 7], /// Pointer to array tracking state of each descriptor from descriptor ring pub desc: u64, } impl QueueRegionPacked { /// New instance of QueueRegionPacked struct pub fn new(features: u64, queue_size: u16) -> Self { QueueRegionPacked { features, version: 1, desc_num: queue_size, free_head: 0, old_free_head: 0, used_idx: 0, old_used_idx: 0, used_wrap_counter: 0, old_used_wrap_counter: 0, padding: [0; 7], desc: 0, } } } #[cfg(test)] mod tests { use super::*; use std::mem; #[cfg(feature = "xen")] impl VhostUserMemoryRegion { fn new(guest_phys_addr: u64, memory_size: u64, user_addr: u64, mmap_offset: u64) -> Self { Self::with_xen( guest_phys_addr, memory_size, user_addr, mmap_offset, MmapXenFlags::FOREIGN.bits(), 0, ) } } #[test] fn check_frontend_request_code() { let code: u32 = FrontendReq::GET_FEATURES.into(); assert!(FrontendReq::try_from(code).is_ok()); assert_eq!(code, code.clone()); assert!(FrontendReq::try_from(10000).is_err()); } #[test] fn check_backend_request_code() { let code: u32 = BackendReq::CONFIG_CHANGE_MSG.into(); assert!(BackendReq::try_from(code).is_ok()); assert_eq!(code, code.clone()); assert!(BackendReq::try_from(10000).is_err()); } #[test] fn msg_header_ops() { let mut hdr = VhostUserMsgHeader::new(FrontendReq::GET_FEATURES, 0, 0x100); assert_eq!(hdr.get_code().unwrap(), FrontendReq::GET_FEATURES); hdr.set_code(FrontendReq::SET_FEATURES); assert_eq!(hdr.get_code().unwrap(), FrontendReq::SET_FEATURES); assert_eq!(hdr.get_version(), 0x1); assert!(!hdr.is_reply()); hdr.set_reply(true); assert!(hdr.is_reply()); hdr.set_reply(false); assert!(!hdr.is_need_reply()); hdr.set_need_reply(true); assert!(hdr.is_need_reply()); hdr.set_need_reply(false); assert_eq!(hdr.get_size(), 0x100); hdr.set_size(0x200); assert_eq!(hdr.get_size(), 0x200); assert!(!hdr.is_need_reply()); assert!(!hdr.is_reply()); assert_eq!(hdr.get_version(), 0x1); // Check message length assert!(hdr.is_valid()); hdr.set_size(0x2000); assert!(!hdr.is_valid()); hdr.set_size(0x100); assert_eq!(hdr.get_size(), 0x100); assert!(hdr.is_valid()); hdr.set_size((MAX_MSG_SIZE - mem::size_of::>()) as u32); assert!(hdr.is_valid()); hdr.set_size(0x0); assert!(hdr.is_valid()); // Check version hdr.set_version(0x0); assert!(!hdr.is_valid()); hdr.set_version(0x2); assert!(!hdr.is_valid()); hdr.set_version(0x1); assert!(hdr.is_valid()); // Test Debug, Clone, PartiaEq trait assert_eq!(hdr, hdr.clone()); assert_eq!(hdr.clone().get_code().unwrap(), hdr.get_code().unwrap()); assert_eq!(format!("{:?}", hdr.clone()), format!("{:?}", hdr)); } #[test] fn test_vhost_user_message_u64() { let val = VhostUserU64::default(); let val1 = VhostUserU64::new(0); let a = val.value; let b = val1.value; assert_eq!(a, b); let a = VhostUserU64::new(1).value; assert_eq!(a, 1); } #[test] fn check_user_memory() { let mut msg = VhostUserMemory::new(1); assert!(msg.is_valid()); msg.num_regions = MAX_ATTACHED_FD_ENTRIES as u32; assert!(msg.is_valid()); msg.num_regions += 1; assert!(!msg.is_valid()); msg.num_regions = 0xFFFFFFFF; assert!(!msg.is_valid()); msg.num_regions = MAX_ATTACHED_FD_ENTRIES as u32; msg.padding1 = 1; assert!(!msg.is_valid()); } #[test] fn check_user_memory_region() { let mut msg = VhostUserMemoryRegion::new(0, 0x1000, 0, 0); assert!(msg.is_valid()); msg.guest_phys_addr = 0xFFFFFFFFFFFFEFFF; assert!(msg.is_valid()); msg.guest_phys_addr = 0xFFFFFFFFFFFFF000; assert!(!msg.is_valid()); msg.guest_phys_addr = 0xFFFFFFFFFFFF0000; msg.memory_size = 0; assert!(!msg.is_valid()); let a = msg.guest_phys_addr; let b = msg.guest_phys_addr; assert_eq!(a, b); let msg = VhostUserMemoryRegion::default(); let a = msg.guest_phys_addr; assert_eq!(a, 0); let a = msg.memory_size; assert_eq!(a, 0); let a = msg.user_addr; assert_eq!(a, 0); let a = msg.mmap_offset; assert_eq!(a, 0); } #[test] fn test_vhost_user_state() { let state = VhostUserVringState::new(5, 8); let a = state.index; assert_eq!(a, 5); let a = state.num; assert_eq!(a, 8); assert!(state.is_valid()); let state = VhostUserVringState::default(); let a = state.index; assert_eq!(a, 0); let a = state.num; assert_eq!(a, 0); assert!(state.is_valid()); } #[test] fn test_vhost_user_addr() { let mut addr = VhostUserVringAddr::new( 2, VhostUserVringAddrFlags::VHOST_VRING_F_LOG, 0x1000, 0x2000, 0x3000, 0x4000, ); let a = addr.index; assert_eq!(a, 2); let a = addr.flags; assert_eq!(a, VhostUserVringAddrFlags::VHOST_VRING_F_LOG.bits()); let a = addr.descriptor; assert_eq!(a, 0x1000); let a = addr.used; assert_eq!(a, 0x2000); let a = addr.available; assert_eq!(a, 0x3000); let a = addr.log; assert_eq!(a, 0x4000); assert!(addr.is_valid()); addr.descriptor = 0x1001; assert!(!addr.is_valid()); addr.descriptor = 0x1000; addr.available = 0x3001; assert!(!addr.is_valid()); addr.available = 0x3000; addr.used = 0x2001; assert!(!addr.is_valid()); addr.used = 0x2000; assert!(addr.is_valid()); } #[test] fn test_vhost_user_state_from_config() { let config = VringConfigData { queue_max_size: 256, queue_size: 128, flags: VhostUserVringAddrFlags::VHOST_VRING_F_LOG.bits(), desc_table_addr: 0x1000, used_ring_addr: 0x2000, avail_ring_addr: 0x3000, log_addr: Some(0x4000), }; let addr = VhostUserVringAddr::from_config_data(2, &config); let a = addr.index; assert_eq!(a, 2); let a = addr.flags; assert_eq!(a, VhostUserVringAddrFlags::VHOST_VRING_F_LOG.bits()); let a = addr.descriptor; assert_eq!(a, 0x1000); let a = addr.used; assert_eq!(a, 0x2000); let a = addr.available; assert_eq!(a, 0x3000); let a = addr.log; assert_eq!(a, 0x4000); assert!(addr.is_valid()); } #[test] fn check_user_vring_addr() { let mut msg = VhostUserVringAddr::new(0, VhostUserVringAddrFlags::all(), 0x0, 0x0, 0x0, 0x0); assert!(msg.is_valid()); msg.descriptor = 1; assert!(!msg.is_valid()); msg.descriptor = 0; msg.available = 1; assert!(!msg.is_valid()); msg.available = 0; msg.used = 1; assert!(!msg.is_valid()); msg.used = 0; msg.flags |= 0x80000000; assert!(!msg.is_valid()); msg.flags &= !0x80000000; } #[test] fn check_user_config_msg() { let mut msg = VhostUserConfig::new(0, VHOST_USER_CONFIG_SIZE, VhostUserConfigFlags::WRITABLE); assert!(msg.is_valid()); msg.size = 0; assert!(!msg.is_valid()); msg.size = 1; assert!(msg.is_valid()); msg.offset = u32::MAX; assert!(!msg.is_valid()); msg.offset = VHOST_USER_CONFIG_SIZE; assert!(!msg.is_valid()); msg.offset = VHOST_USER_CONFIG_SIZE - 1; assert!(msg.is_valid()); msg.size = 2; assert!(!msg.is_valid()); msg.size = 1; msg.flags |= VhostUserConfigFlags::LIVE_MIGRATION.bits(); assert!(msg.is_valid()); msg.flags |= 0x4; assert!(!msg.is_valid()); } #[test] fn test_vhost_user_fs_backend() { let mut fs_backend = VhostUserFSBackendMsg::default(); assert!(fs_backend.is_valid()); fs_backend.fd_offset[0] = 0xffff_ffff_ffff_ffff; fs_backend.len[0] = 0x1; assert!(!fs_backend.is_valid()); assert_ne!( VhostUserFSBackendMsgFlags::MAP_R, VhostUserFSBackendMsgFlags::MAP_W ); assert_eq!(VhostUserFSBackendMsgFlags::EMPTY.bits(), 0); } } vhost-0.10.0/src/vhost_user/mod.rs000064400000000000000000000513421046102023000151670ustar 00000000000000// Copyright (C) 2019 Alibaba Cloud Computing. All rights reserved. // SPDX-License-Identifier: Apache-2.0 //! The protocol for vhost-user is based on the existing implementation of vhost for the Linux //! Kernel. The protocol defines two sides of the communication, frontend and backend. Frontend is //! the application that shares its virtqueues. Backend is the consumer of the virtqueues. //! //! The communication channel between the frontend and the backend includes two sub channels. One is //! used to send requests from the frontend to the backend and optional replies from the backend to the //! frontend. This sub channel is created on frontend startup by connecting to the backend service //! endpoint. The other is used to send requests from the backend to the frontend and optional replies //! from the frontend to the backend. This sub channel is created by the frontend issuing a //! VHOST_USER_SET_BACKEND_REQ_FD request to the backend with an auxiliary file descriptor. //! //! Unix domain socket is used as the underlying communication channel because the frontend needs to //! send file descriptors to the backend. //! //! Most messages that can be sent via the Unix domain socket implementing vhost-user have an //! equivalent ioctl to the kernel implementation. use std::fs::File; use std::io::Error as IOError; pub mod message; pub use self::message::{VhostUserProtocolFeatures, VhostUserVirtioFeatures}; mod connection; pub use self::connection::Listener; #[cfg(feature = "vhost-user-frontend")] mod frontend; #[cfg(feature = "vhost-user-frontend")] pub use self::frontend::{Frontend, VhostUserFrontend}; #[cfg(feature = "vhost-user")] mod frontend_req_handler; #[cfg(feature = "vhost-user")] pub use self::frontend_req_handler::{ FrontendReqHandler, VhostUserFrontendReqHandler, VhostUserFrontendReqHandlerMut, }; #[cfg(feature = "vhost-user-backend")] mod backend; #[cfg(feature = "vhost-user-backend")] pub use self::backend::BackendListener; #[cfg(feature = "vhost-user-backend")] mod backend_req_handler; #[cfg(feature = "vhost-user-backend")] pub use self::backend_req_handler::{ BackendReqHandler, VhostUserBackendReqHandler, VhostUserBackendReqHandlerMut, }; #[cfg(feature = "vhost-user-backend")] mod backend_req; #[cfg(feature = "vhost-user-backend")] pub use self::backend_req::Backend; /// Errors for vhost-user operations #[derive(Debug)] pub enum Error { /// Invalid parameters. InvalidParam, /// Invalid operation due to some reason InvalidOperation(&'static str), /// Unsupported operation due to missing feature InactiveFeature(VhostUserVirtioFeatures), /// Unsupported operations due to that the protocol feature hasn't been negotiated. InactiveOperation(VhostUserProtocolFeatures), /// Invalid message format, flag or content. InvalidMessage, /// Only part of a message have been sent or received successfully PartialMessage, /// The peer disconnected from the socket. Disconnected, /// Message is too large OversizedMsg, /// Fd array in question is too big or too small IncorrectFds, /// Can't connect to peer. SocketConnect(std::io::Error), /// Generic socket errors. SocketError(std::io::Error), /// The socket is broken or has been closed. SocketBroken(std::io::Error), /// Should retry the socket operation again. SocketRetry(std::io::Error), /// Failure from the backend side. BackendInternalError, /// Failure from the frontend side. FrontendInternalError, /// Virtio/protocol features mismatch. FeatureMismatch, /// Error from request handler ReqHandlerError(IOError), /// memfd file creation error MemFdCreateError, /// File truncate error FileTrucateError, /// memfd file seal errors MemFdSealError, } impl std::fmt::Display for Error { fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { match self { Error::InvalidParam => write!(f, "invalid parameters"), Error::InvalidOperation(reason) => write!(f, "invalid operation: {}", reason), Error::InactiveFeature(bits) => write!(f, "inactive feature: {}", bits.bits()), Error::InactiveOperation(bits) => { write!(f, "inactive protocol operation: {}", bits.bits()) } Error::InvalidMessage => write!(f, "invalid message"), Error::PartialMessage => write!(f, "partial message"), Error::Disconnected => write!(f, "peer disconnected"), Error::OversizedMsg => write!(f, "oversized message"), Error::IncorrectFds => write!(f, "wrong number of attached fds"), Error::SocketError(e) => write!(f, "socket error: {}", e), Error::SocketConnect(e) => write!(f, "can't connect to peer: {}", e), Error::SocketBroken(e) => write!(f, "socket is broken: {}", e), Error::SocketRetry(e) => write!(f, "temporary socket error: {}", e), Error::BackendInternalError => write!(f, "backend internal error"), Error::FrontendInternalError => write!(f, "Frontend internal error"), Error::FeatureMismatch => write!(f, "virtio/protocol features mismatch"), Error::ReqHandlerError(e) => write!(f, "handler failed to handle request: {}", e), Error::MemFdCreateError => { write!(f, "handler failed to allocate memfd during get_inflight_fd") } Error::FileTrucateError => { write!(f, "handler failed to trucate memfd during get_inflight_fd") } Error::MemFdSealError => write!( f, "handler failed to apply seals to memfd during get_inflight_fd" ), } } } impl std::error::Error for Error {} impl Error { /// Determine whether to rebuild the underline communication channel. pub fn should_reconnect(&self) -> bool { match *self { // Should reconnect because it may be caused by temporary network errors. Error::PartialMessage => true, // Should reconnect because the underline socket is broken. Error::SocketBroken(_) => true, // Backend internal error, hope it recovers on reconnect. Error::BackendInternalError => true, // Frontend internal error, hope it recovers on reconnect. Error::FrontendInternalError => true, // Should just retry the IO operation instead of rebuilding the underline connection. Error::SocketRetry(_) => false, // Looks like the peer deliberately disconnected the socket. Error::Disconnected => false, Error::InvalidParam | Error::InvalidOperation(_) => false, Error::InactiveFeature(_) | Error::InactiveOperation(_) => false, Error::InvalidMessage | Error::IncorrectFds | Error::OversizedMsg => false, Error::SocketError(_) | Error::SocketConnect(_) => false, Error::FeatureMismatch => false, Error::ReqHandlerError(_) => false, Error::MemFdCreateError | Error::FileTrucateError | Error::MemFdSealError => false, } } } impl std::convert::From for Error { /// Convert raw socket errors into meaningful vhost-user errors. /// /// The vmm_sys_util::errno::Error is a simple wrapper over the raw errno, which doesn't means /// much to the vhost-user connection manager. So convert it into meaningful errors to simplify /// the connection manager logic. /// /// # Return: /// * - Error::SocketRetry: temporary error caused by signals or short of resources. /// * - Error::SocketBroken: the underline socket is broken. /// * - Error::SocketError: other socket related errors. #[allow(unreachable_patterns)] // EWOULDBLOCK equals to EGAIN on linux fn from(err: vmm_sys_util::errno::Error) -> Self { match err.errno() { // The socket is marked nonblocking and the requested operation would block. libc::EAGAIN => Error::SocketRetry(IOError::from_raw_os_error(libc::EAGAIN)), // The socket is marked nonblocking and the requested operation would block. libc::EWOULDBLOCK => Error::SocketRetry(IOError::from_raw_os_error(libc::EWOULDBLOCK)), // A signal occurred before any data was transmitted libc::EINTR => Error::SocketRetry(IOError::from_raw_os_error(libc::EINTR)), // The output queue for a network interface was full. This generally indicates // that the interface has stopped sending, but may be caused by transient congestion. libc::ENOBUFS => Error::SocketRetry(IOError::from_raw_os_error(libc::ENOBUFS)), // No memory available. libc::ENOMEM => Error::SocketRetry(IOError::from_raw_os_error(libc::ENOMEM)), // Connection reset by peer. libc::ECONNRESET => Error::SocketBroken(IOError::from_raw_os_error(libc::ECONNRESET)), // The local end has been shut down on a connection oriented socket. In this case the // process will also receive a SIGPIPE unless MSG_NOSIGNAL is set. libc::EPIPE => Error::SocketBroken(IOError::from_raw_os_error(libc::EPIPE)), // Write permission is denied on the destination socket file, or search permission is // denied for one of the directories the path prefix. libc::EACCES => Error::SocketConnect(IOError::from_raw_os_error(libc::EACCES)), // Catch all other errors e => Error::SocketError(IOError::from_raw_os_error(e)), } } } /// Result of vhost-user operations pub type Result = std::result::Result; /// Result of request handler. pub type HandlerResult = std::result::Result; /// Utility function to take the first element from option of a vector of files. /// Returns `None` if the vector contains no file or more than one file. pub(crate) fn take_single_file(files: Option>) -> Option { let mut files = files?; if files.len() != 1 { return None; } Some(files.swap_remove(0)) } #[cfg(all(test, feature = "vhost-user-backend"))] mod dummy_backend; #[cfg(all(test, feature = "vhost-user-frontend", feature = "vhost-user-backend"))] mod tests { use std::fs::File; use std::os::unix::io::AsRawFd; use std::path::{Path, PathBuf}; use std::sync::{Arc, Barrier, Mutex}; use std::thread; use vmm_sys_util::rand::rand_alphanumerics; use vmm_sys_util::tempfile::TempFile; use super::dummy_backend::{DummyBackendReqHandler, VIRTIO_FEATURES}; use super::message::*; use super::*; use crate::backend::VhostBackend; use crate::{VhostUserDirtyLogRegion, VhostUserMemoryRegionInfo, VringConfigData}; fn temp_path() -> PathBuf { PathBuf::from(format!( "/tmp/vhost_test_{}", rand_alphanumerics(8).to_str().unwrap() )) } fn create_backend(path: P, backend: Arc) -> (Frontend, BackendReqHandler) where P: AsRef, S: VhostUserBackendReqHandler, { let listener = Listener::new(&path, true).unwrap(); let mut backend_listener = BackendListener::new(listener, backend).unwrap(); let frontend = Frontend::connect(&path, 1).unwrap(); (frontend, backend_listener.accept().unwrap().unwrap()) } #[test] fn create_dummy_backend() { let backend = Arc::new(Mutex::new(DummyBackendReqHandler::new())); backend.set_owner().unwrap(); assert!(backend.set_owner().is_err()); } #[test] fn test_set_owner() { let backend_be = Arc::new(Mutex::new(DummyBackendReqHandler::new())); let path = temp_path(); let (frontend, mut backend) = create_backend(path, backend_be.clone()); assert!(!backend_be.lock().unwrap().owned); frontend.set_owner().unwrap(); backend.handle_request().unwrap(); assert!(backend_be.lock().unwrap().owned); frontend.set_owner().unwrap(); assert!(backend.handle_request().is_err()); assert!(backend_be.lock().unwrap().owned); } #[test] fn test_set_features() { let mbar = Arc::new(Barrier::new(2)); let sbar = mbar.clone(); let path = temp_path(); let backend_be = Arc::new(Mutex::new(DummyBackendReqHandler::new())); let (mut frontend, mut backend) = create_backend(path, backend_be.clone()); thread::spawn(move || { backend.handle_request().unwrap(); assert!(backend_be.lock().unwrap().owned); backend.handle_request().unwrap(); backend.handle_request().unwrap(); assert_eq!( backend_be.lock().unwrap().acked_features, VIRTIO_FEATURES & !0x1 ); backend.handle_request().unwrap(); backend.handle_request().unwrap(); assert_eq!( backend_be.lock().unwrap().acked_protocol_features, VhostUserProtocolFeatures::all().bits() ); sbar.wait(); }); frontend.set_owner().unwrap(); // set virtio features let features = frontend.get_features().unwrap(); assert_eq!(features, VIRTIO_FEATURES); frontend.set_features(VIRTIO_FEATURES & !0x1).unwrap(); // set vhost protocol features let features = frontend.get_protocol_features().unwrap(); assert_eq!(features.bits(), VhostUserProtocolFeatures::all().bits()); frontend.set_protocol_features(features).unwrap(); mbar.wait(); } #[test] fn test_frontend_backend_process() { let mbar = Arc::new(Barrier::new(2)); let sbar = mbar.clone(); let path = temp_path(); let backend_be = Arc::new(Mutex::new(DummyBackendReqHandler::new())); let (mut frontend, mut backend) = create_backend(path, backend_be.clone()); thread::spawn(move || { // set_own() backend.handle_request().unwrap(); assert!(backend_be.lock().unwrap().owned); // get/set_features() backend.handle_request().unwrap(); backend.handle_request().unwrap(); assert_eq!( backend_be.lock().unwrap().acked_features, VIRTIO_FEATURES & !0x1 ); backend.handle_request().unwrap(); backend.handle_request().unwrap(); let mut features = VhostUserProtocolFeatures::all(); // Disable Xen mmap feature. if !cfg!(feature = "xen") { features.remove(VhostUserProtocolFeatures::XEN_MMAP); } assert_eq!( backend_be.lock().unwrap().acked_protocol_features, features.bits() ); // get_inflight_fd() backend.handle_request().unwrap(); // set_inflight_fd() backend.handle_request().unwrap(); // get_queue_num() backend.handle_request().unwrap(); // set_mem_table() backend.handle_request().unwrap(); // get/set_config() backend.handle_request().unwrap(); backend.handle_request().unwrap(); // set_backend_request_fd backend.handle_request().unwrap(); // set_vring_enable backend.handle_request().unwrap(); // set_log_base,set_log_fd() backend.handle_request().unwrap_err(); backend.handle_request().unwrap_err(); // set_vring_xxx backend.handle_request().unwrap(); backend.handle_request().unwrap(); backend.handle_request().unwrap(); backend.handle_request().unwrap(); backend.handle_request().unwrap(); backend.handle_request().unwrap(); // get_max_mem_slots() backend.handle_request().unwrap(); // add_mem_region() backend.handle_request().unwrap(); // remove_mem_region() backend.handle_request().unwrap(); sbar.wait(); }); frontend.set_owner().unwrap(); // set virtio features let features = frontend.get_features().unwrap(); assert_eq!(features, VIRTIO_FEATURES); frontend.set_features(VIRTIO_FEATURES & !0x1).unwrap(); // set vhost protocol features let mut features = frontend.get_protocol_features().unwrap(); assert_eq!(features.bits(), VhostUserProtocolFeatures::all().bits()); // Disable Xen mmap feature. if !cfg!(feature = "xen") { features.remove(VhostUserProtocolFeatures::XEN_MMAP); } frontend.set_protocol_features(features).unwrap(); // Retrieve inflight I/O tracking information let (inflight_info, inflight_file) = frontend .get_inflight_fd(&VhostUserInflight { num_queues: 2, queue_size: 256, ..Default::default() }) .unwrap(); // Set the buffer back to the backend frontend .set_inflight_fd(&inflight_info, inflight_file.as_raw_fd()) .unwrap(); let num = frontend.get_queue_num().unwrap(); assert_eq!(num, 2); let eventfd = vmm_sys_util::eventfd::EventFd::new(0).unwrap(); let mem = [VhostUserMemoryRegionInfo::new( 0, 0x10_0000, 0, 0, eventfd.as_raw_fd(), )]; frontend.set_mem_table(&mem).unwrap(); frontend .set_config(0x100, VhostUserConfigFlags::WRITABLE, &[0xa5u8; 4]) .unwrap(); let buf = [0x0u8; 4]; let (reply_body, reply_payload) = frontend .get_config(0x100, 4, VhostUserConfigFlags::empty(), &buf) .unwrap(); let offset = reply_body.offset; assert_eq!(offset, 0x100); assert_eq!(&reply_payload, &[0xa5; 4]); frontend.set_backend_request_fd(&eventfd).unwrap(); frontend.set_vring_enable(0, true).unwrap(); frontend .set_log_base( 0, Some(VhostUserDirtyLogRegion { mmap_size: 0x1000, mmap_offset: 0, mmap_handle: eventfd.as_raw_fd(), }), ) .unwrap(); frontend.set_log_fd(eventfd.as_raw_fd()).unwrap(); frontend.set_vring_num(0, 256).unwrap(); frontend.set_vring_base(0, 0).unwrap(); let config = VringConfigData { queue_max_size: 256, queue_size: 128, flags: VhostUserVringAddrFlags::VHOST_VRING_F_LOG.bits(), desc_table_addr: 0x1000, used_ring_addr: 0x2000, avail_ring_addr: 0x3000, log_addr: Some(0x4000), }; frontend.set_vring_addr(0, &config).unwrap(); frontend.set_vring_call(0, &eventfd).unwrap(); frontend.set_vring_kick(0, &eventfd).unwrap(); frontend.set_vring_err(0, &eventfd).unwrap(); let max_mem_slots = frontend.get_max_mem_slots().unwrap(); assert_eq!(max_mem_slots, 32); let region_file: File = TempFile::new().unwrap().into_file(); let region = VhostUserMemoryRegionInfo::new(0x10_0000, 0x10_0000, 0, 0, region_file.as_raw_fd()); frontend.add_mem_region(®ion).unwrap(); frontend.remove_mem_region(®ion).unwrap(); mbar.wait(); } #[test] fn test_error_display() { assert_eq!(format!("{}", Error::InvalidParam), "invalid parameters"); assert_eq!( format!("{}", Error::InvalidOperation("reason")), "invalid operation: reason" ); } #[test] fn test_should_reconnect() { assert!(Error::PartialMessage.should_reconnect()); assert!(Error::BackendInternalError.should_reconnect()); assert!(Error::FrontendInternalError.should_reconnect()); assert!(!Error::InvalidParam.should_reconnect()); assert!(!Error::InvalidOperation("reason").should_reconnect()); assert!( !Error::InactiveFeature(VhostUserVirtioFeatures::PROTOCOL_FEATURES).should_reconnect() ); assert!(!Error::InactiveOperation(VhostUserProtocolFeatures::all()).should_reconnect()); assert!(!Error::InvalidMessage.should_reconnect()); assert!(!Error::IncorrectFds.should_reconnect()); assert!(!Error::OversizedMsg.should_reconnect()); assert!(!Error::FeatureMismatch.should_reconnect()); } #[test] fn test_error_from_sys_util_error() { let e: Error = vmm_sys_util::errno::Error::new(libc::EAGAIN).into(); if let Error::SocketRetry(e1) = e { assert_eq!(e1.raw_os_error().unwrap(), libc::EAGAIN); } else { panic!("invalid error code conversion!"); } } } vhost-0.10.0/src/vsock.rs000064400000000000000000000021151046102023000133260ustar 00000000000000// Copyright (C) 2019 Alibaba Cloud Computing. All rights reserved. // SPDX-License-Identifier: Apache-2.0 or BSD-3-Clause // // Portions Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. // // Portions Copyright 2017 The Chromium OS Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE-BSD-Google file. //! Trait to control vhost-vsock backend drivers. use crate::backend::VhostBackend; use crate::Result; /// Trait to control vhost-vsock backend drivers. pub trait VhostVsock: VhostBackend { /// Set the CID for the guest. /// This number is used for routing all data destined for running in the guest. /// Each guest on a hypervisor must have an unique CID. /// /// # Arguments /// * `cid` - CID to assign to the guest fn set_guest_cid(&self, cid: u64) -> Result<()>; /// Tell the VHOST driver to start performing data transfer. fn start(&self) -> Result<()>; /// Tell the VHOST driver to stop performing data transfer. fn stop(&self) -> Result<()>; }