pax_global_header 0000666 0000000 0000000 00000000064 14741217270 0014517 g ustar 00root root 0000000 0000000 52 comment=960e5969b66c3239d2c7206b6c62a1d45c4f414c
liboprf-0.6.1/ 0000775 0000000 0000000 00000000000 14741217270 0013160 5 ustar 00root root 0000000 0000000 liboprf-0.6.1/.github/ 0000775 0000000 0000000 00000000000 14741217270 0014520 5 ustar 00root root 0000000 0000000 liboprf-0.6.1/.github/workflows/ 0000775 0000000 0000000 00000000000 14741217270 0016555 5 ustar 00root root 0000000 0000000 liboprf-0.6.1/.github/workflows/codeql-analysis.yml 0000664 0000000 0000000 00000002561 14741217270 0022374 0 ustar 00root root 0000000 0000000 name: "CodeQL"
on:
push:
branches: [master]
pull_request:
# The branches below must be a subset of the branches above
branches: [master]
schedule:
- cron: '0 3 * * 2'
jobs:
analyze:
name: Analyze
runs-on: ubuntu-latest
steps:
- name: Checkout repository
uses: actions/checkout@v4
# Initializes the CodeQL tools for scanning.
- name: Initialize CodeQL
uses: github/codeql-action/init@v3
# Override language selection by uncommenting this and choosing your languages
# with:
# languages: go, javascript, csharp, python, cpp, java
# Autobuild attempts to build any compiled languages (C/C++, C#, or Java).
# If this step fails, then you should remove it and run the build manually (see below)
#- name: Autobuild
# uses: github/codeql-action/autobuild@v1
# ℹ️ Command-line programs to run using the OS shell.
# 📚 https://git.io/JvXDl
# ✏️ If the Autobuild fails above, remove it and uncomment the following three lines
# and modify them (or add more) to build your code if your project
# uses a compiled language
- run: |
sudo apt update
sudo apt install -y libsodium-dev pkgconf # build-essential git
# main liboprf
cd src
make test
- name: Perform CodeQL Analysis
uses: github/codeql-action/analyze@v3
liboprf-0.6.1/.github/workflows/python-app.yml 0000664 0000000 0000000 00000002511 14741217270 0021376 0 ustar 00root root 0000000 0000000 # This workflow will install Python dependencies, run tests and lint with a single version of Python
# For more information see: https://docs.github.com/en/actions/automating-builds-and-tests/building-and-testing-python
name: Python application
on:
push:
branches: [ "master" ]
pull_request:
branches: [ "master" ]
permissions:
contents: read
jobs:
build:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Set up Python 3.10
uses: actions/setup-python@v3
with:
python-version: "3.10"
- name: Install dependencies
run: |
python -m pip install --upgrade pip
pip install flake8 pytest
sudo apt update
sudo apt install -y libsodium-dev pkgconf # build-essential git
pip install python/ pysodium SecureString
cd src
sudo PREFIX=/usr make install
cd ..
- name: Lint with flake8
run: |
# stop the build if there are Python syntax errors or undefined names
flake8 . --count --select=E9,F63,F7,F82 --show-source --statistics
# exit-zero treats all errors as warnings. The GitHub editor is 127 chars wide
flake8 . --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics
- name: Test with pytest
run: |
pytest -s -v python/tests/test.py
liboprf-0.6.1/.gitignore 0000664 0000000 0000000 00000000140 14741217270 0015143 0 ustar 00root root 0000000 0000000 liboprf.a
*.o
.arch
bench
*.pdf
matrices
liboprf.so
dkg
thmult
toprf
tuokms
uokms
attack
tp-dkg
liboprf-0.6.1/LICENSE 0000664 0000000 0000000 00000016744 14741217270 0014201 0 ustar 00root root 0000000 0000000 GNU LESSER GENERAL PUBLIC LICENSE
Version 3, 29 June 2007
Copyright (C) 2007 Free Software Foundation, Inc.
Everyone is permitted to copy and distribute verbatim copies
of this license document, but changing it is not allowed.
This version of the GNU Lesser General Public License incorporates
the terms and conditions of version 3 of the GNU General Public
License, supplemented by the additional permissions listed below.
0. Additional Definitions.
As used herein, "this License" refers to version 3 of the GNU Lesser
General Public License, and the "GNU GPL" refers to version 3 of the GNU
General Public License.
"The Library" refers to a covered work governed by this License,
other than an Application or a Combined Work as defined below.
An "Application" is any work that makes use of an interface provided
by the Library, but which is not otherwise based on the Library.
Defining a subclass of a class defined by the Library is deemed a mode
of using an interface provided by the Library.
A "Combined Work" is a work produced by combining or linking an
Application with the Library. The particular version of the Library
with which the Combined Work was made is also called the "Linked
Version".
The "Minimal Corresponding Source" for a Combined Work means the
Corresponding Source for the Combined Work, excluding any source code
for portions of the Combined Work that, considered in isolation, are
based on the Application, and not on the Linked Version.
The "Corresponding Application Code" for a Combined Work means the
object code and/or source code for the Application, including any data
and utility programs needed for reproducing the Combined Work from the
Application, but excluding the System Libraries of the Combined Work.
1. Exception to Section 3 of the GNU GPL.
You may convey a covered work under sections 3 and 4 of this License
without being bound by section 3 of the GNU GPL.
2. Conveying Modified Versions.
If you modify a copy of the Library, and, in your modifications, a
facility refers to a function or data to be supplied by an Application
that uses the facility (other than as an argument passed when the
facility is invoked), then you may convey a copy of the modified
version:
a) under this License, provided that you make a good faith effort to
ensure that, in the event an Application does not supply the
function or data, the facility still operates, and performs
whatever part of its purpose remains meaningful, or
b) under the GNU GPL, with none of the additional permissions of
this License applicable to that copy.
3. Object Code Incorporating Material from Library Header Files.
The object code form of an Application may incorporate material from
a header file that is part of the Library. You may convey such object
code under terms of your choice, provided that, if the incorporated
material is not limited to numerical parameters, data structure
layouts and accessors, or small macros, inline functions and templates
(ten or fewer lines in length), you do both of the following:
a) Give prominent notice with each copy of the object code that the
Library is used in it and that the Library and its use are
covered by this License.
b) Accompany the object code with a copy of the GNU GPL and this license
document.
4. Combined Works.
You may convey a Combined Work under terms of your choice that,
taken together, effectively do not restrict modification of the
portions of the Library contained in the Combined Work and reverse
engineering for debugging such modifications, if you also do each of
the following:
a) Give prominent notice with each copy of the Combined Work that
the Library is used in it and that the Library and its use are
covered by this License.
b) Accompany the Combined Work with a copy of the GNU GPL and this license
document.
c) For a Combined Work that displays copyright notices during
execution, include the copyright notice for the Library among
these notices, as well as a reference directing the user to the
copies of the GNU GPL and this license document.
d) Do one of the following:
0) Convey the Minimal Corresponding Source under the terms of this
License, and the Corresponding Application Code in a form
suitable for, and under terms that permit, the user to
recombine or relink the Application with a modified version of
the Linked Version to produce a modified Combined Work, in the
manner specified by section 6 of the GNU GPL for conveying
Corresponding Source.
1) Use a suitable shared library mechanism for linking with the
Library. A suitable mechanism is one that (a) uses at run time
a copy of the Library already present on the user's computer
system, and (b) will operate properly with a modified version
of the Library that is interface-compatible with the Linked
Version.
e) Provide Installation Information, but only if you would otherwise
be required to provide such information under section 6 of the
GNU GPL, and only to the extent that such information is
necessary to install and execute a modified version of the
Combined Work produced by recombining or relinking the
Application with a modified version of the Linked Version. (If
you use option 4d0, the Installation Information must accompany
the Minimal Corresponding Source and Corresponding Application
Code. If you use option 4d1, you must provide the Installation
Information in the manner specified by section 6 of the GNU GPL
for conveying Corresponding Source.)
5. Combined Libraries.
You may place library facilities that are a work based on the
Library side by side in a single library together with other library
facilities that are not Applications and are not covered by this
License, and convey such a combined library under terms of your
choice, if you do both of the following:
a) Accompany the combined library with a copy of the same work based
on the Library, uncombined with any other library facilities,
conveyed under the terms of this License.
b) Give prominent notice with the combined library that part of it
is a work based on the Library, and explaining where to find the
accompanying uncombined form of the same work.
6. Revised Versions of the GNU Lesser General Public License.
The Free Software Foundation may publish revised and/or new versions
of the GNU Lesser General Public License from time to time. Such new
versions will be similar in spirit to the present version, but may
differ in detail to address new problems or concerns.
Each version is given a distinguishing version number. If the
Library as you received it specifies that a certain numbered version
of the GNU Lesser General Public License "or any later version"
applies to it, you have the option of following the terms and
conditions either of that published version or of any later version
published by the Free Software Foundation. If the Library as you
received it does not specify a version number of the GNU Lesser
General Public License, you may choose any version of the GNU Lesser
General Public License ever published by the Free Software Foundation.
If the Library as you received it specifies that a proxy can decide
whether future versions of the GNU Lesser General Public License shall
apply, that proxy's public statement of acceptance of any version is
permanent authorization for you to choose that version for the
Library.
liboprf-0.6.1/README.md 0000664 0000000 0000000 00000002702 14741217270 0014440 0 ustar 00root root 0000000 0000000 # liboprf
This library implements the basic OPRF(ristretto255, SHA-512) variant
from the IRTF CFRG Draft: https://github.com/cfrg/draft-irtf-cfrg-voprf/
Additionally it implements a threshold OPRF variant based on
https://eprint.iacr.org/2017/363 by Krawczyk et al. which is
compatible with the CFRG OPRF(ristretto255, SHA-512) variant.
Furthermore it also implements the 3hashTDH from
https://eprint.iacr.org/2024/1455 "Threshold PAKE with Security
against Compromise of all Servers" by Gu, Jarecki, Kedzior, Nazarian,
Xu. This too is compatible with the CFRG OPRF(ristretto255, SHA-512)
variant.
For the threshold OPRF this library also provides distributed
key-generation (DKG) implementation that is based on a trusted
party handling the broadcasts necessary for the DKG, this is
based on the JF-DKG (fig 1.) a variant on Pedersens DKG from
the paper "Secure Distributed Key Generation for Discrete-Log
Based Cryptosystems" by R. Gennaro, S. Jarecki, H. Krawczyk,
and T. Rabin.
In order to update a threshold OPRF instantiation this library contains
the multi-party multiplication is based on Fig. 2 from R. Gennaro,
M. O. Rabin, and T. Rabin. "Simplified VSS and fact-track multiparty
computations with applications to threshold cryptography" In
B. A. Coan and Y. Afek, editors, 17th ACM PODC, pages 101–111. ACM,
June / July 1998.
Additionally a python wrapper is provided, which can be installed
using `pip install pyoprf`
This library depends on libsodium.
liboprf-0.6.1/docs/ 0000775 0000000 0000000 00000000000 14741217270 0014110 5 ustar 00root root 0000000 0000000 liboprf-0.6.1/docs/tp-dkg.txt 0000664 0000000 0000000 00000067653 14741217270 0016060 0 ustar 00root root 0000000 0000000 Trusted-party (TP) Distributed Key Generation (DKG)
This document specifies a proposal for a non-robust DKG that can work
for small deployments with a small number of parties and infrequent
DKG executions. Non-robust means that the protocol succeeds only if no
party aborts. If someone aborts then the protocol needs to run again,
possibly after kicking out misbehaving parties. This protocol does
support maximum 127 peers. This is probably already too much for a
non-robust protocol, but it might work in very special circumstances.
Broadcast is implemented by the trusted party (TP) opening a channel
to each peer secured by the peers long-term encryption key. Every
message is routed through the TP.
Peer long-term encryption keys can be either TLS-based, or
Noise_XK-based (https://noiseexplorer.com/patterns/XK/). In the latter
case the long-term public keys must be known and validated in advance
by the TP.
The basis for this protocol is JF-DKG (fig 1.) a variant on Pedersens
DKG from the 2006 paper "Secure Distributed Key Generation for
Discrete-Log Based Cryptosystems" by R. Gennaro, S. Jarecki,
H. Krawczyk, and T. Rabin [GJKR06]. The algorithm JF-DKG is presented
in the paper as a reduced example how an adversary can influence the
bits in the generated secret by manipulating the complaints and thus
the final composition of the QUAL set, gaining a 3/4 chance to
influence a bit. Since in our TP variant is non-robust, we do not
allow individual disqualifications of peers, - either all peers
qualify or the protocol fails - this mitigates the case where an
adversary can adaptively disqualify a peer. Thus the JF-DKG is a
simple and sufficient algorithm for our purposes.
------<=[ Rationale ]=>-----------
Traditionally DKGs are used in setting where all parties are equal and
are using the distributed key together, without having any one party
having a different role in the protocol utilizing the shared key. This
does not translate entirely to threshold OPRFs (tOPRF) and protocols
based on these.
In an OPRF there is normally two parties, one holding the key, and
another one holding the input and learning the output. In a tOPRF the
party holding the key is a group of peers that hold shares of the key
in a threshold setting.
The whole point of OPRFs is to be able to learn the output for a
certain input, without being able to do so without the contribution of
the party/parties holding (parts of) the key. Hence the party with the
input is in a kind of trusted role, and in many protocols based on
OPRFs it is in the best interest of the input-holding party to not
learn the key (or its parts) - otherwise the input-holding party could
just deploy a PRF instead.
And if the input holding party is in such a trusted role, there is two
options to generate a threshold shared key:
1. the trusted input-holding party just generates a secret and shares
it with the key-holding parties using Shamir's Secret Sharing.
This is a very simple approach, with one drawback, the secret
itself is however briefly know at the input-holding TP.
2. The input-holding TP can run the simple non-robust DKG specified
below. This has the benefit that as long as the protocol is
followed precisely the secret is never "assembled" and thus cannot
leak, and is never exposed to the TP. Drawback of this is, that
the protocol below consists of many rounds of communication.
The protocol in this document allows for a variant, were each
keyshare-holder generates a completely new set of ephemeral
(encryption and signature) keys, and thus allows complete anonymity
between the keyshare-holders from each other. While only the TP is
aware of the identities of each of the keyshare-holders (by knowing
their long-term signature and encryption keys). This increases the
security of the whole scheme, as an attacker compromising one
keyshare-holder will not be able to learn the identity of the other
parties - and more importantly the location of the other keyshares. If
this keyshare-holder anonymity is not necessary, steps 3, 4 and the
first half of step 5 in the following protocol can be skipped.
------<=[ Prototocol Phases ]=>-----------
The protocol has the following phases:
1. Initialization and introduction (step 1 - 5)
2. Setup secure P2P channels (step 5 - 10)
3. core DKG (step 11 - 17)
4. Finish with failure: complaint resolution (only if there are
complaints) (step 17 - 19)
5. Finish with success: verification of transcript and completion of
protocol (step 20 - 22)
------<=[ Simplified API ]=>-----------
Since the protocol consists of many steps, it is recommended to
abstract the API to the following schema:
0. Initialize
While not done and not error:
1. Allocate input buffers
2. input = receive()
3. allocate output buffer
4. run next step of protocol
5. if there is output: send(output)
6. Post-processing
This simple schema simplifies the load of an implementer using this
protocol, reducing opportunities for errors and provides strict
security.
The reference implementation in tp-dkg.c follows this schema for both
the TP and the peers.
------<=[ Protocol transcript ]=>-----------
Transcript - all broadcast messages are accumulated into a transcript
by each peer and the trusted party, at the end of the protocol all
parties publish their signed transcripts and only if all signatures
are correct and the transcripts match, is the protocol successful.
The transcript is a hash, that is initialized with the string:
"tp dkg session transcript"
in pseudo-code:
transcript_state = hash_init("tp dkg session transcript")
Updating the transcript first updates the hash with the canonical
32bit size of the message to be added to the transcript, then the
message itself is added to the hash.
transcript_state = hash_update(transcript_state, I2OSP(len(msg))
transcript_state = hash_update(transcript_state, msg)
The signature of each message is similarly added to the transcript.
A function `update_ts` can be used as a high-level interface to
updating the transcript with messages and their signatures:
```
update_ts(state,msg,sig)
state = hash_update(state, I2OSP(len(msg))
state = hash_update(state, msg)
state = hash_update(state, I2OSP(len(sig))
state = hash_update(state, sig)
return state
```
------<=[ Session id ]=>-----------
Every execution of the protocol starts by the TP sending out a message
with a unique and fresh session id, this is to ensure that no messages
can be replayed. The session id is a 256 bit (32B) random value of
cryptographic quality.
------<=[ Message header ]=>-----------
All messages have a message header:
uint8 messageno
uint32 len
uint8 from
uint8 to
uint64 timestamp
uint8 sessionid[32]
The first field in the header is really a state identifier. A
recipient MUST verify that the messageno is matching with the expected
number related to the state of the protocol.
The len field MUST be equal to the size of the packet received on the
network including the packet header.
The `from` field is simply the index of the peer, since peers are
indexed starting from 1, the value 0 is used for the trusted
party. Any value greater than 128 is invalid. The state defines from
whom to receive messages, and thus the from field MUST be validated
against these expectations.
The `to` field is similar to the `from` field, with the difference
that the value 0xff is reserved for broadcast messages. The peer (or
TP) MUST validate that it is indeed the recipient of a given message.
The timestamp field is just a 64bit timestamp as seconds elapsed since
1970/01/01, for peers that have no accurate clock themselves but do
have an RTC, the first initiating message from the TP SHOULD be used
as a reference for synchronizing during the protocol.
------<=[ Message signatures ]=>-----------
Every message MUST be signed using the sender peers ephemeral signing
key. The signature is made over the message and the appended session
id. The session id is announced by the TP in the first message.
------<=[ Verifying messages ]=>-----------
Whenever a message is received by any participant, they first MUST
check the correctness of the signature:
```
msg, sig = recv()
sign_pk = sign_keys[expected_sender_id]
assert(verify(sign_pk, msg, sig))
```
The recipient MUST also assert the correctness of all the other header
fields:
```
assert(msg.messageno == expected_messageno)
assert(msg.from == expected_sender_id)
assert(msg.to == (own_peer_id or 0xff))
assert(ref_ts <= msg.ts < ref_ts + timeout))
ref_ts = msg.ts
```
The value `timeout` should be configurable and be set to the smallest
value that doesn't cause protocol aborts due to slow responses.
If at any step of the protocol the TP receives one or more messages
that fail these checks, the TP MUST abort the protocol and report all
violating peers to the user.
------<=[ Message transmission ]=>-----------
A higher level message transmission interface can be provided, for
sending:
```
msg, sig = send_msg(msgno, from, to, sign_sk, session_id, data)
ts = timestamp()
msg = messageno: msgno, len: len(header) + len(data) + len(sig), from: from, to: to, ts: ts, data
sig = sign(sign_sk, msg)
return msg, sig
```
And for validating incoming messages:
```
data = recv_msg(msgno, from, to, ref_ts, sign_pk, session_id, msg, sig)
assert(verify(sign_pk, msg, sig)
assert(msg.messageno == msgno)
assert(msg.len == len(msg|sig))
assert(msg.from == from)
assert(msg.to == to)
assert(ref_ts < msg.ts < ref_ts + timeout))
if msg.to == 0xff:
update_ts(state,msg,sig)
```
The parameters `msgno`, `from`, `to`, `session_id` should be the
values expected according to the current protocol state.
------<=[ Cheater detection ]=>-----------
The TP MUST report to the user all errors that can identify cheating
peers in a given step. For each detected cheating peer the TP MUST
record the following information:
- the current protocol step,
- the violating peer,
- the other peer involved, and
- the type of violation
In order to detect other misbehaving peers in the current step,
processing for the rest of the SHOULD peers continue until the end of
the current step. Any further violations should be recorded as above.
Before the next message to the peers is sent, the TP must
check if there are no noted violations, if so the TP aborts and
reports all violators with their parameters to the user.
Abort conditions include any errors detected by recv_msg(), or when
the number of complaints is more than t for one peer, or more than t^2
in total, as well any of the checks of the JF-DKG algorithm from
GJKR06.
------<=[ The protocol ]=>-----------
------<=[ 0. Precondition ]=>-----------
Peers use TLS or TP knows long-term encryption keys for all peers.
Client knows long-term signing keys of all peers.
------<=[ 1. DKG Announcement - TP(peers, t, proto_name) ]=>----------
The protocol starts by asking the trusted party (TP) to initiate a new
run of the DKG protocol by providing it with:
- a list of the peers,
- a threshold value, and
- protocol instance name used as a domain separation token.
The TP then sanity checks these parameters:
```
n = len(peers)
assert(2<=t0)
```
The TP then generates a fresh session id, and a hash of the DST.
The TP then creates a broadcast message containing the session id, a
hash (so that the message is always of fixed size) of the DST,
the values N and T and its own public signing key:
```
dst_str = "TP DKG for protocol %s" % proto_name
dst = hash(I2OSP(len(dst_str)) | dst_str)
sessionid = random_bytes(32)
data = {dst, n, t, tp_sign_pk}
msg_0, sig_0 = send_msg(0, 0, 0xff, tp_sign_sk, session_id, data)
broadcast(msg_0 | sig_0)
```
The TPs copy of the transcript is initialized by the TP, and updated
with the value of the 1st broadcast message:
```
state = hash_init("tp dkg session transcript")
state = update_ts(state, msg, sig)
```
Since the order of the peers is random, and important for the protocol
a custom message is created for each peer by the TP and sent
individually notifying each peer of their index in this protocol
run. This is essentially an empty message consisting only of a
header. The msg.to field conveys the index of the peer.
```
# sending each peer its index
for i in 1..n:
msg_1, sig_1 = send_msg(1, 0, i, tp_sign_sk, session_id, {})
send(i, msg_1 | sig_1)
```
------<=[ 2. each peer(msg_0, sig_0) ]=>------------
In this step each peer receives the initial parameter broadcast,
verifies it, initializes the transcript and adds the initial
message. Then receives the message assigning its index.
```
msg_0, sig_0 = recv()
assert(recv_msg(0, 0, 0xff, ref_ts, msg.data.tp_sign_pk, session_id, msg_0, sig_0))
```
If the peer has no accurate internal clock but has at least an RTC, it
SHOULD set the ref_ts to the message timestamp:
```
ref_ts = msg_0.ts
```
Furthermore the peer MUST also verify that the N&T parameters are
sane, and if possible the peer SHOULD also check if the session id is
fresh (if it is not possible, isfresh() MAY always return true.
```
assert(2 <= msg_0.t < n)
assert(isfresh(msg_0,sessionid))
```
The transcript MUST be initialized by the peer, and updated with the
value of the 1st broadcast message:
```
state = hash_init("tp dkg session transcript")
state = update_ts(state, msg, sig)
```
After processing the broadcast message from the TP, the peers also
have to process the second message from the TP in which they are
assigned their index.
```
sig1, msg1 = recv()
assert(recv_msg(1, 0, msg1.to, ref_ts, tp_sign_pk, session_id, msg_1, sig_1))
assert(msg1.to <= 128 and msg1.to > 0)
peerid = msg.to
```
------<=[ 3. peers broadcast their keys via TP ]=>-------------
If this protocol requires anonymity from each peer all peers broadcast
fresh signing and noise keys to all peers via the TP. If no
peer-anonymity is required it is ok to either send long-term keys keys
here, or skip to the 2nd half or step 5 below.
In order to assure the TP that the peer is authentic, this message is
additionally signed by the peers long-term signing key - which must be
known in advance by the TP. This ensures that the fresh ephemeral keys
belong to the peer and not some adversary.
```
peer_sign_sk, peer_sign_pk = sign_genkey()
peer_noise_sk, peer_noise_pk = noise_genkey()
msg_2, sig_2 = send_msg(2, peerid, 0xff, peer_sign_sk, session_id, {peer_sign_pk, peer_noise_pk})
ltsig = sign(peer_long_term_sig_sk, msg_2|sig_2)
broadcast(ltsig | msg_2 | sig_2 )
```
------<=[ 4. TP collects and broadcasts all peer keys ]=>-------------
The TP first checks if each of the received messages is signed by the
expected long-term signing key, if this fails the TP aborts. If all
long-term signatures are correct the TP MUST strip those signatures
from all the messages. This is to ensure their anonymity from each
other.
Then the TP acts as a broadcast medium on the long-term
signature-stripped messages.
This is a recurring pattern where the TP acts in its broadcasting
intermediary role:
1. receives the messages from each peer
2. validates the message using recv_msg()
3. extracts all signing pubkeys (or other information depending on
the current step) for usage by the TP in the rest of the protocol
4. concatenates all received messages into a new message
5. signs the message of messages
6. adds this the message of messages and its signature to the transcript
7. sends it to all peers
```
peer_sig_pks = []
msgs = []
for i in 1..N
ltsig, msg_2, sig_2 = recv()
assert(verify(lt_sign_pk[i], msg_2 | sig_2, ltsig))
sig_pk, noise_pk = recv_msg(2, i, 0xff, ref_ts, msg_2.data.peer_sign_pk, session_id, msg_2, sig_2)
peer_sig_pks[i] = sig_pk
msgs = msgs | { msg_2 , sig_2 }
msg_3, sig_3 = send_msg(3, 0, 0xff, tp_sign_sk, session_id, msgs)
state = update_ts(state, msg_3, sig_3)
broadcast(msg_3|sig_3)
```
------<=[ 5. each peer get all keys and initiate noise channels with all peers ]=>-------
In this phase all peers process the broadcast signing and noise keys
received from all peers, and initiate a noise_xk handshake with each
of them (including themselves for simplicity and thus security).
Note: For performance it MAY be, that each peer only initiates
handshakes with peers having a higher index than themselves. But this
would create a packet-size and timing side-channel revealing the index
of the peer.
```
msg_3, sig_3 = recv()
msgs = recv_msg(3, 0, 0xff, ref_ts, tp_sign_pk, session_id, msg_3, sig_3)
state = update_ts(state, msg_3, sig_3)
peers_sign_pks = []
peers_noise_pks = []
send_session = []
for i in 1..N
msg, sig = msgs[i]
peers_sign_pks[i], peers_noise_pks[i] = recv_msg(2, i, 0xff, ref_ts, msg.peer_sign_pk, session_id, msg, sig)
send_session[i], handshake1 = noisexk_initiator_session(peer_noise_sk, peers_noise_pks[i])
msg, sig = send_msg(4,peerid,i,peer_sign_sk, session_id, handshake1)
send(msg | sig)
```
------<=[ 6. TP routes handshakes from each peer to each peer ]=>-------
The TP receives all 1st handshake messages from all peers and routes
them correctly to their destination. These messages are not broadcast,
each of them is a P2P message. The benefit of the TP forming a star
topology here is, that the peers can be on very different physical
networks (wifi, lora, uart, nfc, bluetooth, etc) and only the TP needs
to be able to connect to all of them.
```
for i in 1..N
handshakes = recv(i)
for j in 1..N
send(j, handshakes[j])
```
------<=[ 7. each peer responds to each handshake each peer ]=>-------
Peer receives noise handshake1 from each peer and responds with
handshake2 answer to each peer.
```
for i in 1..N
msg, sig = recv()
handshake1 = recv_msg(4, i, peerid, ref_ts, peers_sign_pks[i], session_id, msg, sig)
receive_session[i], handshake2 = noisexk_responder_session(peer_noise_sk, handshake1)
msg, sig = send_msg(5, peerid, i, peer_sign_sk, session_id, handshake2)
send(msg | sig)
```
------<=[ 8. TP routes handshakes from each peer to each peer ]=>-------
TP just routes all P2P messages from all peers to the correct
recipients of the messages.
```
for i in 1..N
handshakes = recv(i)
for j in 1..N
send(j, handshakes[j])
```
------<=[ 9. each peer completes each handshake with each peer ]=>-------
Peers complete the noise handshake.
```
for i in 1..N
msg, sig = recv()
handshake3 = recv_msg(5, i, peerid, ref_ts, peers_sign_pks[i], session_id, msg, sig)
send_session[i] = noisexk_initiator_session_complete(send_session[i], handshake3)
```
------<=[ 10. Setup complete ]=>-------
Each peer has a confidential connection with every peer (including self, for simplicity)
The one time this channel is used, when distributing the shares from
step 13. The sender uses the initiator interface of the noise session,
and the receiver uses the responder interface.
------<=[ 11. each peer executes DKG Round 1 ]=>-------
This step is as described by GJKR06 (fig 1. JF-DKG) step 1: Each party
P_i (as a dealer) chooses a random polynomial f_i(z) over Z_q of degree t:
f_i(z) = a_(i0) + a_(i1)z + ··· + a_(it)z^t
P_i broadcasts A_ik = g^(a_ik) mod p for k = 0,... ,t.
Each P_i computes the shares s_ij = f_i(j) mod q for j = 1, ... ,n.
```
a = []
A = []
for i in 0..t
a[i]=randombytes(32)
A[i]=g*a[i]
s = []
for i in 1..N
for j in 0..t
s[i]+=a[j]*i^j
msg_6, sig_6 = send_msg(6, peerid, 0xff, peer_sign_sk, session_id, A)
send(msg_6 | sig_6)
```
------<=[ 12. TP collects and broadcasts all A vectors ]=>-------
This is another broadcast pattern instance:
receive-verify-collect-sign-transcript-broadcast. The TP keeps a copy
of all commitments being broadcast.
```
A = [][]
msgs = []
for i in 1..N
msg_6, sig_6 = recv(i)
A[i] = recv_msg(6, i, 0xff, ref_ts, peer_sign_pks[i], session_id, msg_6, sig_6)
msgs = msgs | { msg_6 , sig_6 }
msg_7, sig_7 = send_msg(7, 0, 0xff, tp_sign_sk, session_id, msgs)
state = update_ts(state, msg_7, sig_7)
broadcast(msg_7|sig_7)
```
------<=[ 13. each peer collects all A vectors and distributes their generated shares ]=>-------
All peers receive the bundled A commitment messages which have been
sent by all peers and re-broadcast by the TP. First the bundle is
verified, then each message containing the j-th A commitment vector is
also verified. A copy of all A commitment vectors is retained for
later usage. Then the share for the j-th peer is sent using the
previously established noise channel to the j-th peer. These shares
have been already computed in step 11, as per the step 1 of the JF-DKG
algorithm from the GJKR06 paper.
```
msg_7, sig_7 = recv()
msgs = recv_msg(7, 0, 0xff, ref_ts, tp_sign_pk, session_id, msg_7, sig_7)
state = update_ts(state, msg_7, sig_7)
A=[][]
for i in 1..N
msg, sig = msgs[i]
A[i] = recv_msg(6, i, 0xff, ref_ts, peer_sign_pks[i], session_id, msg, sig)
pkt = noise_send(send_session[i], s[i])
msg, sig = send_msg(8,peerid,i,peer_sign_sk, session_id, pkt)
send(msg | sig)
```
------<=[ 14. TP routes noise protected messages between peers ]=>-------
Since all these messages are confidential P2P messages protected by
noise, all the TP is doing in this step is routing each packet to its
correct destination. For the resolution of complaints and cheater
identification, TP keeps a copy of all messages.
```
encrypted_shares = [][]
for i in 1..N
for j in 1..N
msg = recv(i)
send(j, msg)
encrypted_shares[i][j] = msg
```
------<=[ 15. each peer executes DKG Round 2 ]=>-------
Each peer having received all their shares from all the peers,
verifies the messages, and then verifies the shares against the
previously broadcast A commitment vectors. For each s_ij, A_i pair
that fails, a complaint against the peer producing the conflicting
commitment and share is logged in an array, which is broadcast to
everyone. This is essentially step 2 from the JF-DKG algorithm
described in GJKR06.
```
s=[]
for i in 1..N
msg, sig = recv()
pkt = recv_msg(8, i, peerid, ref_ts, peer_sign_pks[i], session_id, msg, sig)
s[i] = noise_recv(receive_session[i], pkt)
complaints = []
for i in 1..N
v = 0
for k in 0..t
v += A[i][k]*peerid*k
if (g*s[i] != v)
complaints = complaints | i
msg, sig = send_msg(9, peerid, 0xff, peer_sign_sk, session_id, len(complaints) | complaints)
send(msg | sig)
```
------<=[ 16. TP collects complaints ]=>-------
Another receive-verify-collect-sign-transcribe-broadcast
instantiation. The TP keeps a copy of all complaints for the 18th
step.
If any peer complaints about more than t peers, that complaining peer
is a cheater, and must be disqualified. Furthermore if there are in
total more than t^2 complaints there are multiple cheaters and the
protocol must be aborted and new peers must be chosen in case a rerun
is initiated.
```
complaints = []
msgs = []
for i in 1..N
msg_9, sig_9 = recv(i)
complaints_i = recv_msg(9, i, 0xff, ref_ts, peer_sign_pks[i], session_id, msg_9, sig_9)
assert(len(complaints_i) < t)
complaints = complaints | complaints_i
msgs = msgs | { msg_9 , sig_9 }
assert(len(complaints) < t^2)
msg_10, sig_10 = send_msg(10, 0, 0xff, tp_sign_sk, session_id, msgs)
state = update_ts(state, msg_10, sig_10)
broadcast(msg_10|sig_10)
```
The next step of the protocol depends on the number of complaints
received, if none then the next step is 21. otherwise 18.
If the next TP step is 18 (there are complaints) the next input buffer
size depends on the number of complaints against each peer.
Each complaint is answered by the symmetric encryption key used to
encrypt the share of the accused belonging to the complainer. Each
accused packs all answers into one message.
------<=[ 17. Each peer receives all complaints ]=>-------
All complaint messages broadcast are received by each peer. If peer_i
is being complained about by peer_j, peer_i sends the symmetric
encryption key that was used to encrypt s_ij to the TP. This is the
first part of step 3. in JF-DKG of GJKR06. There is a slight
variation, instead of broadcasting the share, the accused peer reveals
the symmetric encryption key that was used to encrypt the share. The
TP has a copy of this encrypted message, and with the symmetric
encryption key, it can decrypt the originally sent share. This is some
kind of poor mans provable encryption.
If any complaints have been lodged by any peer the protocol ends here
for all the peers.
```
msg_10, sig_10 = recv()
msgs = recv_msg(10, 0, 0xff, ref_ts, tp_sign_pk, session_id, msg_10, sig_10)
state = update_ts(state, msg_10, sig_10)
keys = []
for i in 1..N
msg, sig = msgs[i]
complaints_len, complaints = recv_msg(9, i, 0xff, ref_ts, peers_sign_pks[i], session_id, msg, sig)
for k in 0..complaints_len
if complaints[k] == peerid
# complaint about current peer, publish key used to encrypt s_ij
keys = keys | send_session[i].key
if len(keys) > 0
msg_11, sig_11 = send_msg(11, peer, 0x0, peer_sign_sk, session_id, keys)
send(msg_11, sig_11)
```
------<=[ 18. TP collects all s_ij, broadcasts and verifies them ]=>-------
In this step TP checks equation 3 from step 2 in JF-DKG of GJKR06.
TP also checks if all complaints lodged earlier are answered by the
correct s_ij shares. The shares to be verified are decrypted from the
previously encrypted messages, using the revealed encryption keys by
the accused peers.
The protocol ends here, as either the complainer or the accused tried
to cheat.
```
for i in 1..N
if len(complaints[i]) < 1
continue
msg, sig = recv(i)
keys = recv_msg(11, i, 0xff, ref_ts, peers_sign_pks[i], session_id, msg, sig)
assert(len(keys) == len(complaints[i]))
sij=[][]
for j, key in keys
sij[i][j]=decrypt(key, encrypted_shares[i][j])
for complaint in complaints[i]
v = 0
for k in 0..t
v += A[i][k]*peerid*k
if(g*sij[complaint.from][complaint.data] != v)
suspicious = suspicious | identity(i)
else
suspicious = suspicious | identity(j)
```
------<=[ 19. Compare all transcripts ]=>-------
Each peer calculates the final transcripts and sends it to TP.
```
transcript = final_ts(state)
msg_20, sig_20 = send_msg(20, peerid, 0, peer_sign_sk, session_id, transcript)
send(msg_20, sig_20)
```
------<=[ 20. TP receives all and verifies transcripts ]=>-------
TP receives all transcripts, and asserts that they all match its own
transcript, it aborts if any transcript mismatch is detected. If
everything matches tt broadcasts the result either as OK.
```
transcript = final_ts(state)
for i in 1..N
msg, sig = recv(i)
ts = recv_msg(20, i, 0xff, ref_ts, peers_sign_pks[i], session_id, msg, sig)
assert( ts == transcript)
msg_21, sig_21 = send_msg(21, 0, 0xff, tp_sign_sk, session_id, { "OK" })
------<=[ 21. SUCCESS, peers set their share and confirm ]=>-------
All peers receive the OK acknowledgment from the TP and calculate the
final share, this is equivalent with the calculation of x_j in the
4. step in JF-DKG of GJKR06. Finally all peers acknowledge this step
with another "OK" message sent to the TP. This is the final step for
the peers, each needs to persist the calculated x_j share for usage in
later threshold protocol runs (such as tOPRF).
```
msg_21, sig_21 = recv()
recv_msg(21, 0, 0xff, ref_ts, tp_sign_pk, session_id, msg_21, sig_21)
share = 0
for i in 1..N
share += s[i]
msg_22, sig_22 = send_msg(22, peerid, 0, peers_sign_sk, session_id, "OK")
persist(own_peer_id, share)
```
------<=[ 22. TP asserts all peers respond with "OK" ]=>-------
The TP collects all "OK" messages from all peers.
```
for i in 1..N
msg, sig = recv(i)
ok = recv_msg(22, i, 0, ref_ts, peers_sign_pks[i], session_id, msg, sig)
assert( ok == "OK")
```
This successfully concludes the protocol.
liboprf-0.6.1/docs/tp-update.txt 0000664 0000000 0000000 00000003740 14741217270 0016560 0 ustar 00root root 0000000 0000000 Trusted-Party (TP) threshold OPRF key update Protocol
This document specifies a proposal for a non-robust threshold OPRF key
update protocol that can work for small deployments with a small
number of parties and infrequent DKG executions. Non-robust means that
the protocol succeeds only if no party aborts. If someone aborts then
the protocol needs to run again, possibly after kicking out
misbehaving parties. This protocol does support maximum 127
peers. This is probably already too much for a non-robust protocol,
but it might work in very special circumstances.
Broadcast is implemented by the trusted party (TP) opening a channel
to each peer secured by the peers long-term encryption key. Every
message is routed through the TP.
Peer long-term encryption keys can be either TLS-based, or
Noise_XK-based (https://noiseexplorer.com/patterns/XK/). In the latter
case the long-term public keys must be known and validated in advance
by the TP.
The basis for this protocol is the TP-DKG protocol as specified at
https://github.com/stef/liboprf/blob/master/docs/tp-dkg.txt, and a
Distributed Multiplication protocol which given the sharings of secret
a and secret b generates a sharing of the product a·b without learning
anything about either secret.
The multi-party multiplication is based on Fig. 2 from R. Gennaro,
M. O. Rabin, and T. Rabin. "Simplified VSS and fact-track multiparty
computations with applications to threshold cryptography" In
B. A. Coan and Y. Afek, editors, 17th ACM PODC, pages 101–111. ACM,
June / July 1998.
0. TP ensures that n >= 2t+1, otherwise abort.
1. execute TP-DKG for all dealers, if DKG fails, abort
2. dealers (exactly 2t+1 peers) run multiparty multiplication step
one, send the results to the corresponding peers
3. TP pre-computes the inverted Van der Monde matrix, and broadcasts the first row.
4. all peers run multiparty multiplication step 2 on inputs from TP and the dealers.
5. all dealers report their shares generated during the DKG, to calculate delta
liboprf-0.6.1/misc/ 0000775 0000000 0000000 00000000000 14741217270 0014113 5 ustar 00root root 0000000 0000000 liboprf-0.6.1/misc/attack.c 0000664 0000000 0000000 00000007472 14741217270 0015540 0 ustar 00root root 0000000 0000000 // # SPDX-FileCopyrightText: 2024, Marsiske Stefan
// # SPDX-License-Identifier: GPL-3.0-or-later
// build with
// $ gcc -Wall -O3 attack.c -o attack -loprf -lsodium
// then run:
// $ ./attack test
#include // memcmp
#include // f?printf
#include // uint8_t
#include // va_list, va_start, va_end
#include
#include
static const uint8_t k[crypto_core_ristretto255_SCALARBYTES] = {1};
void dump(const uint8_t *p, const size_t len, const char* msg, ...) {
va_list args;
va_start(args, msg);
vfprintf(stderr,msg, args);
va_end(args);
fprintf(stderr,"\t");
for(size_t i=0;ibeta\n", exec);
printf("usage: cat rwd | %s guess password\n", exec);
return ret;
}
static int tamper(const uint8_t alpha[crypto_core_ristretto255_BYTES],
uint8_t beta[crypto_core_ristretto255_BYTES]) {
puts("tampering");
dump(k, sizeof k, "k");
if(0!=crypto_scalarmult_ristretto255(beta, k, alpha)) {
fputs("failed to tamper with k\nabort.\n", stderr);
return 1;
}
return 0;
}
static int guess(uint8_t rwd[OPRF_BYTES], const uint8_t *pwd, const size_t pwd_len) {
//fputs("[1] hashing to group...", stdout);
uint8_t h0pwd[crypto_core_ristretto255_BYTES]={0};
if(0!=voprf_hash_to_group(pwd, pwd_len, h0pwd)) {
fputs("failed to hash to group\nabort\n", stderr);
return 1;
}
// tamper(h0pwd, h0pwd)
uint8_t rwd_[OPRF_BYTES];
if(0!=oprf_Finalize(pwd, pwd_len, h0pwd, rwd_)) {
fputs("failed to finalize OPRF\nabort\n", stderr);
return 1;
}
if(memcmp(rwd,rwd_, OPRF_BYTES)!=0) return -1;
return 0;
}
static int test(void) {
// regular OPRF flow on the client
const uint8_t password[] = "Exploitability of this is low, OPRFs are still cool";
uint8_t alpha[crypto_core_ristretto255_BYTES]={0};
uint8_t r[crypto_core_ristretto255_SCALARBYTES]={0};
if(0!=oprf_Blind(password, sizeof password, r, alpha)) {
fputs("failed to blind password\nabort\n", stderr);
return 1;
}
//dump(r, sizeof r, "r");
// we tamper with beta
uint8_t beta[crypto_core_ristretto255_BYTES]={0};
dump(alpha, sizeof alpha, "alpha");
tamper(alpha, beta);
dump(beta, sizeof beta, "beta");
// regular OPRF flow on the client
uint8_t N[crypto_core_ristretto255_BYTES]={0};
int x = oprf_Unblind(r, beta, N);
if(0!=x) {
fputs("failed to unblind beta\nabort\n", stderr);
return 1;
}
uint8_t rwd[OPRF_BYTES];
if(0!=oprf_Finalize(password, sizeof password, N, rwd)) {
fputs("failed to finalize OPRF\nabort\n", stderr);
return 1;
}
// we "intercept" the oprf output and guess candidate inputs
fprintf(stderr, "guess(\"%s\") = %d\n", password, guess(rwd, password, sizeof password-1));
fprintf(stderr, "guess(\"%s\") = %d\n", password, guess(rwd, password, sizeof password));
return 0;
}
int main(const int argc, const char** argv) {
if(argc<2) {
return usage(argv[0], 0);
}
if(memcmp(argv[1],"tamper",7)==0) {
uint8_t alpha[crypto_core_ristretto255_BYTES];
if(fread(alpha, 1, 32, stdin) != 32) {
fputs("failed to read point\nabort.\n", stderr);
return 1;
}
uint8_t beta[crypto_core_ristretto255_BYTES];
if(0!=tamper(alpha, beta)) {
return 1;
};
fwrite(beta, 1, sizeof beta, stdout);
return 0;
}
if(memcmp(argv[1],"guess",6)==0) {
if(argc<3) {
return usage(argv[0], 1);
}
uint8_t rwd[OPRF_BYTES];
if(fread(rwd, 1, OPRF_BYTES, stdin) != OPRF_BYTES) {
fputs("failed to read rwd\nabort.\n", stderr);
return 1;
}
return guess(rwd, (uint8_t*) argv[2], strlen(argv[2]));
}
if(memcmp(argv[1],"test",5)==0) {
return test();
}
return usage(argv[0], 1);
}
liboprf-0.6.1/python/ 0000775 0000000 0000000 00000000000 14741217270 0014501 5 ustar 00root root 0000000 0000000 liboprf-0.6.1/python/.gitignore 0000664 0000000 0000000 00000000056 14741217270 0016472 0 ustar 00root root 0000000 0000000 pyoprf.egg-info
pyoprf/__pycache__
build
dist
liboprf-0.6.1/python/MANIFEST.in 0000664 0000000 0000000 00000000037 14741217270 0016237 0 ustar 00root root 0000000 0000000 include README.md
include *.py
liboprf-0.6.1/python/README.md 0000664 0000000 0000000 00000000415 14741217270 0015760 0 ustar 00root root 0000000 0000000 # pyoprf
This is the python bindings for liboprf.
## installation
you'll need https://github.com/stef/liboprf/
which depends on libsodium.
a simple `pip install pyoprf` should suffice to install the bindings.
## usage
see the file `test.py`
## License
LGPLv3.0+
liboprf-0.6.1/python/examples/ 0000775 0000000 0000000 00000000000 14741217270 0016317 5 ustar 00root root 0000000 0000000 liboprf-0.6.1/python/examples/3hashtdh.py 0000775 0000000 0000000 00000001476 14741217270 0020412 0 ustar 00root root 0000000 0000000 #!/usr/bin/env python
from pyoprf import keygen, create_shares, blind, evaluate, unblind, thresholdmult
from pysodium import randombytes, crypto_core_ristretto255_from_hash, crypto_generichash, crypto_core_ristretto255_add
k = keygen()
shares = create_shares(k, 5, 3)
zero_shares = create_shares(bytes([0]*32), 5, 3)
r, alpha = blind(b"test")
ssid_S = randombytes(32)
betas = []
for ki, zi in zip(shares,zero_shares):
h2 = evaluate(
zi[1:],
crypto_core_ristretto255_from_hash(crypto_generichash(ssid_S + alpha, outlen=64)),
)
beta = evaluate(ki[1:], alpha)
betas.append(ki[:1]+crypto_core_ristretto255_add(beta, h2))
# normal 2hashdh(k,"test")
beta = evaluate(k, alpha)
Nt0 = unblind(r, beta)
print(Nt0)
beta = thresholdmult(betas[:3])
Nt1 = unblind(r, beta)
print(Nt1)
assert Nt0 == Nt1
liboprf-0.6.1/python/examples/tpdkg_test.py 0000775 0000000 0000000 00000007634 14741217270 0021056 0 ustar 00root root 0000000 0000000 #!/usr/bin/env python
"""
Test for TP DKG wrapper of pyoprf/liboprf
SPDX-FileCopyrightText: 2024, Marsiske Stefan
SPDX-License-Identifier: LGPL-3.0-or-later
Copyright (c) 2024, Marsiske Stefan.
All rights reserved.
This file is part of liboprf.
liboprf is free software: you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public License
as published by the Free Software Foundation, either version 3 of
the License, or (at your option) any later version.
liboprf is distributed in the hope that it will be
useful, but WITHOUT ANY WARRANTY; without even the implied
warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
See the GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with liboprf. If not, see .
"""
import pyoprf, pysodium, ctypes
n = 5
t = 3
ts_epsilon = 5
# enable verbose logging for tp-dkg
libc = ctypes.cdll.LoadLibrary('libc.so.6')
cstderr = ctypes.c_void_p.in_dll(libc, 'stderr')
log_file = ctypes.c_void_p.in_dll(pyoprf.liboprf,'log_file')
log_file.value = cstderr.value
# create some long-term keypairs
peer_lt_pks = []
peer_lt_sks = []
for _ in range(n):
pk, sk = pysodium.crypto_sign_keypair()
peer_lt_pks.append(pk)
peer_lt_sks.append(sk)
# initialize the TP and get the first message
tp, msg0 = pyoprf.tpdkg_start_tp(n, t, ts_epsilon, "pyoprf tpdkg test", peer_lt_pks)
print(f"n: {pyoprf.tpdkg_tpstate_n(tp)}, t: {pyoprf.tpdkg_tpstate_t(tp)}, sid: {bytes(c for c in pyoprf.tpdkg_tpstate_sessionid(tp)).hex()}")
# initialize all peers with the 1st message from TP
peers=[]
for i in range(n):
peer = pyoprf.tpdkg_peer_start(ts_epsilon, peer_lt_sks[i], msg0)
peers.append(peer)
for i in range(n):
assert(pyoprf.tpdkg_peerstate_sessionid(peers[i]) == pyoprf.tpdkg_tpstate_sessionid(tp))
assert(peer_lt_sks[i] == pyoprf.tpdkg_peerstate_lt_sk(peers[i]))
peer_msgs = []
while pyoprf.tpdkg_tp_not_done(tp):
ret, sizes = pyoprf.tpdkg_tp_input_sizes(tp)
# peer_msgs = (recv(size) for size in sizes)
msgs = b''.join(peer_msgs)
cur_step = pyoprf.tpdkg_tpstate_step(tp)
try:
tp_out = pyoprf.tpdkg_tp_next(tp, msgs)
#print(f"tp: msg[{tp[0].step}]: {tp_out.raw.hex()}")
except Exception as e:
cheaters, cheats = pyoprf.tpdkg_get_cheaters(tp)
print(f"Warning during the distributed key generation the peers misbehaved: {sorted(cheaters)}")
for k, v in cheats:
print(f"\tmisbehaving peer: {k} was caught: {v}")
raise ValueError(f"{e} | tp step {cur_step}")
peer_msgs = []
while(len(b''.join(peer_msgs))==0 and pyoprf.tpdkg_peer_not_done(peers[0])):
for i in range(n):
if(len(tp_out)>0):
msg = pyoprf.tpdkg_tp_peer_msg(tp, tp_out, i)
#print(f"tp -> peer[{i+1}] {msg.hex()}")
else:
msg = ''
out = pyoprf.tpdkg_peer_next(peers[i], msg)
if(len(out)>0):
peer_msgs.append(out)
#print(f"peer[{i+1}] -> tp {peer_msgs[-1].hex()}")
tp_out = ''
# we are done, let's check the shares
shares = [pyoprf.tpdkg_peerstate_share(peers[i]) for i in range(n)]
for i, share in enumerate(shares):
print(f"share[{i+1}] {share.hex()}")
v0 = pyoprf.thresholdmult([bytes([i+1])+pysodium.crypto_scalarmult_ristretto255_base(shares[i][1:]) for i in (0,1,2)])
v1 = pyoprf.thresholdmult([bytes([i+1])+pysodium.crypto_scalarmult_ristretto255_base(shares[i][1:]) for i in (2,0,3)])
assert v0 == v1
v2 = pyoprf.thresholdmult([bytes([i+1])+pysodium.crypto_scalarmult_ristretto255_base(shares[i][1:]) for i in (2,1,4)])
assert v0 == v2
secret = pyoprf.dkg_reconstruct(shares[:t])
#print("secret", secret.hex())
assert v0 == pysodium.crypto_scalarmult_ristretto255_base(secret)
# clean up allocated buffers
for i in range(n):
pyoprf.tpdkg_peer_free(peers[i])
liboprf-0.6.1/python/pyoprf/ 0000775 0000000 0000000 00000000000 14741217270 0016020 5 ustar 00root root 0000000 0000000 liboprf-0.6.1/python/pyoprf/__init__.py 0000775 0000000 0000000 00000075464 14741217270 0020154 0 ustar 00root root 0000000 0000000 #!/usr/bin/env python
"""
Wrapper for liboprf library
SPDX-FileCopyrightText: 2023, Marsiske Stefan
SPDX-License-Identifier: LGPL-3.0-or-later
Copyright (c) 2023, Marsiske Stefan.
All rights reserved.
This file is part of liboprf.
liboprf is free software: you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public License
as published by the Free Software Foundation, either version 3 of
the License, or (at your option) any later version.
liboprf is distributed in the hope that it will be
useful, but WITHOUT ANY WARRANTY; without even the implied
warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
See the GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with liboprf. If not, see .
"""
import ctypes
import ctypes.util
import pysodium, os
import platform
from typing import List, Tuple
from itertools import zip_longest
if "BYZANTINE_DKG" in os.environ:
liboprf = ctypes.cdll.LoadLibrary(os.environ['BYZANTINE_DKG'])
else:
liboprf = ctypes.cdll.LoadLibrary(ctypes.util.find_library('oprf') or ctypes.util.find_library('liboprf'))
if not liboprf._name:
raise ValueError('Unable to find liboprf')
def split_by_n(iterable, n):
return list(zip_longest(*[iter(iterable)]*n, fillvalue=''))
def __check(code):
if code != 0:
raise ValueError(f"error: {code}")
# (CFRG/IRTF) OPRF section
OPRF_BYTES=64
# This function generates an OPRF private key.
#
# This is almost the KeyGen OPRF function defined in the RFC: since
# this lib does not implement V oprf, we don't need a pubkey and so
# we don't bother with all that is related.
#
# @param [out] k - the per-user OPRF private key
# void oprf_KeyGen(uint8_t kU[crypto_core_ristretto255_SCALARBYTES]);
def keygen() -> bytes:
k = ctypes.create_string_buffer(pysodium.crypto_core_ristretto255_SCALARBYTES)
liboprf.oprf_KeyGen(k)
return k.raw
# This function converts input x into an element of the OPRF group, randomizes it
# by some scalar r, producing blinded, and outputs (r, blinded).
#
# This is the Blind OPRF function defined in the RFC.
#
# @param [in] x - the input value to blind
# @param [out] r - an OPRF scalar value used for randomization
# @param [out] blinded - a serialized OPRF group element, a byte array of fixed length,
# the blinded version of x, an input to oprf_Evaluate
# @return The function raises a ValueError if there is something wrong with the inputs.
#
#int oprf_Blind(const uint8_t *x, const uint16_t x_len,
# uint8_t r[crypto_core_ristretto255_SCALARBYTES],
# uint8_t blinded[crypto_core_ristretto255_BYTES]);
def blind(x: bytes) -> (bytes, bytes):
r = ctypes.create_string_buffer(pysodium.crypto_core_ristretto255_SCALARBYTES)
blinded = ctypes.create_string_buffer(pysodium.crypto_core_ristretto255_BYTES)
__check(liboprf.oprf_Blind(x, ctypes.c_size_t(len(x)), r, blinded))
return r.raw, blinded.raw
# This function evaluates input element blinded using private key k, yielding output
# element Z.
#
# This is the Evaluate OPRF function defined in the RFC.
#
# @param [in] key - a private key - the output of keygen()
# @param [in] blinded - a serialized OPRF group element, a byte array
# of fixed length, an output of blind()
# @param [out] Z - a serialized OPRF group element, a byte array of fixed
# length, an input to oprf_Unblind
# @return The function raises a ValueError if there is something wrong with the inputs.
#int oprf_Evaluate(const uint8_t k[crypto_core_ristretto255_SCALARBYTES],
# const uint8_t blinded[crypto_core_ristretto255_BYTES],
# uint8_t Z[crypto_core_ristretto255_BYTES]);
def evaluate(key: bytes, blinded: bytes) -> bytes:
if len(key) != pysodium.crypto_core_ristretto255_SCALARBYTES:
raise ValueError("key has incorrect length")
if not isinstance(key, bytes):
raise ValueError("key is not of type bytes")
if len(blinded) != pysodium.crypto_core_ristretto255_BYTES:
raise ValueError("blinded param has incorrect length")
if not isinstance(blinded, bytes):
raise ValueError("blinded is not of type bytes")
Z = ctypes.create_string_buffer(pysodium.crypto_core_ristretto255_BYTES)
__check(liboprf.oprf_Evaluate(key, blinded, Z))
return Z.raw
# This function removes random scalar r from Z, yielding output N.
#
# This is the Unblind OPRF function defined in the RFC.
#
# If you do not call finalize() on the result the output is equivalent
# to the OPRF protcol we refer to as HashDH - this protocol retains
# the algebraic structure of the value, and has weaker security
# guarantees, than the full 2HashDH which is equivalent to running
# finalize on the output of blind(). The hashDH variant is not
# explicitly specified by the CFRG/IRTF specification. This hashDH
# variant has one property that makes it interesting: it is an
# updateable OPRF - that is if the server updates their key, they can
# calculate a public delta value, that can be applied by the client to
# the output of blind() and the result will be as if the client and
# the server run the OPRF protocol with the original input and the new
# key. It is important to note that the delta value is not sensitive,
# and can be public.
#
# @param [in] r - an OPRF scalar value used for randomization in oprf_Blind
# @param [in] Z - a serialized OPRF group element, a byte array of fixed length,
# an output of oprf_Evaluate
# @param [out] N - a serialized OPRF group element with random scalar r removed,
# a byte array of fixed length, an input to oprf_Finalize
# @return The function raises a ValueError if there is something wrong with the inputs.
#int oprf_Unblind(const uint8_t r[crypto_core_ristretto255_SCALARBYTES],
# const uint8_t Z[crypto_core_ristretto255_BYTES],
# uint8_t N[crypto_core_ristretto255_BYTES]);
def unblind(r: bytes, Z: bytes) -> bytes:
if len(r) != pysodium.crypto_core_ristretto255_SCALARBYTES:
raise ValueError("param r has incorrect length")
if not isinstance(r, bytes):
raise ValueError("param r is not of type bytes")
if len(Z) != pysodium.crypto_core_ristretto255_BYTES:
raise ValueError("param Z has incorrect length")
if not isinstance(Z, bytes):
raise ValueError("param Z is not of type bytes")
N = ctypes.create_string_buffer(pysodium.crypto_core_ristretto255_BYTES)
__check(liboprf.oprf_Unblind(r, Z, N))
return N.raw
# This function computes the OPRF output using input x, N, and domain
# separation tag info.
#
# This is the Finalize OPRF function defined in the RFC.
#
# @param [in] x - a value used to compute OPRF (the same value that
# was used as input to be blinded)
# @param [in] N - a serialized OPRF group element, a byte array of fixed length,
# an output of oprf_Unblind
# @param [out] y - an OPRF output
# @return The function raises a ValueError if there is something wrong with the inputs.
#int oprf_Finalize(const uint8_t *x, const uint16_t x_len,
# const uint8_t N[crypto_core_ristretto255_BYTES],
# uint8_t rwdU[OPRF_BYTES]);
def finalize(x: bytes, N: bytes) -> bytes:
if len(N) != pysodium.crypto_core_ristretto255_BYTES:
raise ValueError("param N has incorrect length")
if not isinstance(N, bytes):
raise ValueError("param N is not of type bytes")
y = ctypes.create_string_buffer(OPRF_BYTES)
__check(liboprf.oprf_Finalize(x, ctypes.c_size_t(len(x)), N, y))
return y.raw
# This function combines unblind() and finalize() as a convenience
def unblind_finalize(r: bytes, Z: bytes, x: bytes) -> bytes:
return finalize(x, unblind(r,Z))
# TOPRF section
TOPRF_Share_BYTES=pysodium.crypto_core_ristretto255_SCALARBYTES+1
TOPRF_Part_BYTES=pysodium.crypto_core_ristretto255_BYTES+1
# This function calculates a lagrange coefficient based on the index
# and the indexes of the other contributing shareholders.
#
# @param [in] index - the index of the shareholder whose lagrange
# coefficient we're calculating, must be greater than 0
#
# @param [in] peers - list of the shares that contribute to the reconstruction
#
# @param [out] result - the lagrange coefficient
#void coeff(const int index, const int peers_len, const uint8_t peers[peers_len], uint8_t result[crypto_scalarmult_ristretto255_SCALARBYTES]);
def coeff(index: int, peers: list) -> bytes:
if index < 1: raise ValueError("index must be positive integer")
if len(peers) < 2: ValueError("peers must be a list of at least 2 integers")
peers_len=ctypes.c_size_t(len(peers))
c = ctypes.create_string_buffer(pysodium.crypto_core_ristretto255_SCALARBYTES)
liboprf.coeff(index, peers_len, peers, c)
return c.raw
# This function creates shares of secret in a (threshold, n) scheme
# over the curve ristretto255
#
# @param [in] secret - the scalar value to be secretly shared
#
# @param [in] n - the number of shares created
#
# @param [in] threshold - the threshold needed to reconstruct the secret
#
# @param [out] shares - n shares
#
# @return The function raises a ValueError if there is something wrong with the inputs.
#void toprf_create_shares(const uint8_t secret[crypto_core_ristretto255_SCALARBYTES],
# const uint8_t n,
# const uint8_t threshold,
# uint8_t shares[n][TOPRF_Share_BYTES]);
bytes_list_t = List[bytes]
def create_shares(secret: bytes, n: int, t: int) -> bytes_list_t:
if len(secret) != pysodium.crypto_core_ristretto255_SCALARBYTES:
raise ValueError("secret has incorrect length")
if not isinstance(secret, bytes):
raise ValueError("secret is not of type bytes")
if n < t:
raise ValueError("t cannot be bigger than n")
if t < 2:
raise ValueError("t must be bigger than 1")
shares = ctypes.create_string_buffer(n*TOPRF_Share_BYTES)
__check(liboprf.toprf_create_shares(secret, n, t, shares))
return tuple([bytes(s) for s in split_by_n(shares.raw, TOPRF_Share_BYTES)])
# This function recovers the secret in the exponent using lagrange interpolation
# over the curve ristretto255
#
# The shareholders are not aware if they are contributing to a
# threshold or non-threshold oprf evaluation, from their perspective
# nothing changes in this approach.
#
# @param [in] responses - is an array of shares (k_i) multiplied by a
# point (P) on the r255 curve
#
# @param [in] responses_len - the number of elements in the response array
#
# @param [out] result - the reconstructed value of P multipled by k
#
# @return The function raises a ValueError if there is something wrong with the inputs.
#int toprf_thresholdmult(const size_t response_len,
# const uint8_t responses[response_len][TOPRF_Part_BYTES],
# uint8_t result[crypto_scalarmult_ristretto255_BYTES]);
def thresholdmult(responses: bytes_list_t) -> bytes:
if len(responses) < 2: ValueError("responses must be a list of at least 2 integers")
if not all(isinstance(r,bytes) for r in responses):
raise ValueError("at least one of the responses is not of type bytes")
if not all(len(r)==TOPRF_Part_BYTES for r in responses):
raise ValueError("at least one of the responses is not of correct size")
responses_len=ctypes.c_size_t(len(responses))
responses_buf = ctypes.create_string_buffer(b''.join(responses))
result = ctypes.create_string_buffer(pysodium.crypto_core_ristretto255_BYTES)
__check(liboprf.toprf_thresholdmult(responses_len, responses_buf, result))
return result.raw
# This function is the efficient threshold version of oprf_Evaluate.
#
# This function needs to know in advance the indexes of all the
# shares that will be combined later in the toprf_thresholdcombine() function.
# by doing so this reduces the total costs and distributes them to the shareholders.
#
# @param [in] k - a private key (for OPAQUE, this is kU, the user's
# OPRF private key)
#
# @param [in] blinded - a serialized OPRF group element, a byte array
# of fixed length, an output of oprf_Blind (for OPAQUE, this
# is the blinded pwdU, the user's password)
#
# @param [in] self - the index of the current shareholder
#
# @param [in] indexes - the indexes of the all the shareholders
# contributing to this oprf evaluation,
#
# @param [in] index_len - the length of the indexes array,
#
# @param [out] Z - a serialized OPRF group element, a byte array of fixed length,
# an input to oprf_Unblind
#
# @return The function raises a ValueError if there is something wrong with the inputs.
#int toprf_Evaluate(const uint8_t k[TOPRF_Share_BYTES],
# const uint8_t blinded[crypto_core_ristretto255_BYTES],
# const uint8_t self, const uint8_t *indexes, const uint16_t index_len,
# uint8_t Z[TOPRF_Part_BYTES]);
def threshold_evaluate(k: bytes, blinded: bytes, self: int, indexes: list) -> bytes:
if len(k) != TOPRF_Share_BYTES:
raise ValueError("param k has incorrect length")
if not isinstance(k, bytes):
raise ValueError("param k is not of type bytes")
if len(blinded) != pysodium.crypto_core_ristretto255_BYTES:
raise ValueError("blinded param has incorrect length")
if not isinstance(blinded, bytes):
raise ValueError("blinded is not of type bytes")
if(self>255 or self<1):
raise ValueError("self outside valid range")
if(not all(i>0 and i<256 for i in indexes)):
raise ValueError("index(es) outside valid range")
index_len=ctypes.c_uint16(len(indexes))
indexes_buf=ctypes.create_string_buffer(bytes(indexes))
Z = ctypes.create_string_buffer(TOPRF_Part_BYTES)
__check(liboprf.toprf_Evaluate(k, blinded, self, indexes_buf, index_len, Z))
return Z.raw
# This function is combines the results of the toprf_Evaluate()
# function to recover the shared secret in the exponent.
#
# @param [in] responses - is an array of shares (k_i) multiplied by a point (P) on the r255 curve
#
# @param [in] responses_len - the number of elements in the response array
#
# @param [out] result - the reconstructed value of P multipled by k
#
# @return The function raises a ValueError if there is something wrong with the inputs.
#void toprf_thresholdcombine(const size_t response_len,
# const uint8_t _responses[response_len][TOPRF_Part_BYTES],
# uint8_t result[crypto_scalarmult_ristretto255_BYTES]);
def threshold_combine(responses: bytes_list_t) -> bytes:
if len(responses) < 2: ValueError("responses must be a list of at least 2 integers")
if not all(isinstance(r,bytes) for r in responses):
raise ValueError("at least one of the responses is not of type bytes")
if not all(len(r)==TOPRF_Part_BYTES for r in responses):
raise ValueError("at least one of the responses is not of correct size")
responses_len=ctypes.c_size_t(len(responses))
responses_buf = ctypes.create_string_buffer(b''.join(responses))
result = ctypes.create_string_buffer(pysodium.crypto_core_ristretto255_BYTES)
__check(liboprf.toprf_thresholdcombine(responses_len, responses_buf, result))
return result.raw
#int toprf_3hashtdh(const uint8_t k[TOPRF_Share_BYTES],
# const uint8_t z[TOPRF_Share_BYTES],
# const uint8_t alpha[crypto_core_ristretto255_BYTES],
# const uint8_t *ssid_S, const uint16_t ssid_S_len,
# uint8_t beta[TOPRF_Part_BYTES]);
def _3hashtdh(k: bytes, z: bytes, alpha: bytes, ssid_S: bytes) -> bytes:
if len(k) != TOPRF_Share_BYTES:
raise ValueError("param k has incorrect length")
if not isinstance(k, bytes):
raise ValueError("param k is not of type bytes")
if len(z) != TOPRF_Share_BYTES:
raise ValueError("param z has incorrect length")
if not isinstance(z, bytes):
raise ValueError("param z is not of type bytes")
if len(alpha) != pysodium.crypto_core_ristretto255_BYTES:
raise ValueError("alpha param has incorrect length")
if not isinstance(alpha, bytes):
raise ValueError("alpha is not of type bytes")
if not isinstance(ssid_S, bytes):
raise ValueError("ssid_S is not of type bytes")
if len(ssid_S) > (1<<16)-1:
raise ValueError("ssid_S is too long")
ssid_S_len=ctypes.c_uint16(len(ssid_S))
beta = ctypes.create_string_buffer(TOPRF_Part_BYTES)
__check(liboprf.toprf_3hashtdh(k, z, alpha, ssid_S, ssid_S_len, beta))
return beta.raw
# todo documentation!
#int dkg_start(const uint8_t n,
# const uint8_t threshold,
# uint8_t commitment_hash[dkg_hash_BYTES],
# uint8_t commitments[dkg_commitment_BYTES(threshold)],
# TOPRF_Share shares[n]);
def dkg_start(n : int, t : int) -> (bytes, bytes, bytes_list_t):
if n < t:
raise ValueError("t cannot be bigger than n")
if t < 2:
raise ValueError("t must be bigger than 1")
shares = ctypes.create_string_buffer(n*TOPRF_Share_BYTES)
commitments = ctypes.create_string_buffer(t*pysodium.crypto_core_ristretto255_BYTES)
__check(liboprf.dkg_start(n, t, commitments, shares))
shares = tuple([bytes(s) for s in split_by_n(shares.raw, TOPRF_Share_BYTES)])
return commitments.raw, shares
#int dkg_verify_commitments(const uint8_t n,
# const uint8_t threshold,
# const uint8_t self,
# const uint8_t commitments[n][threshold*crypto_core_ristretto255_BYTES],
# const TOPRF_Share shares[n],
# uint8_t fails[n],
# uint8_t *fails_len);
def dkg_verify_commitments(n: int, t: int, self: int,
commitments : bytes_list_t,
shares: bytes_list_t) -> bytes:
if n < t:
raise ValueError("t cannot be bigger than n")
if t < 2:
raise ValueError("t must be bigger than 1")
if self < 1 or self > n:
raise ValueError("self must 1 <= self <= n")
if len(commitments) != n*t*pysodium.crypto_core_ristretto255_BYTES:
raise ValueError(f"signed_commitments must be {n*t*pysodium.crypto_core_ristretto255_BYTES} bytes is instead: {len(commitments)}")
shares = b''.join(shares)
if len(shares) != n*TOPRF_Share_BYTES:
raise ValueError(f"shares must be {TOPRF_Share_BYTES*n} bytes is instead {len(shares)}")
shares = ctypes.create_string_buffer(shares)
fails = ctypes.create_string_buffer(n)
fails_len = ctypes.c_uint8()
__check(liboprf.dkg_verify_commitments(n, t, self,
commitments, shares,
fails, ctypes.byref(fails_len)))
return fails[:fails_len.value]
#void dkg_finish(const uint8_t n,
# const TOPRF_Share shares[n],
# const uint8_t self,
# TOPRF_Share *xi);
def dkg_finish(n: int, shares: List[bytes], self: int, ) -> bytes:
if self < 1 or self > n:
raise ValueError("self must 1 <= self <= n")
shares = b''.join(shares)
if len(shares) != n*TOPRF_Share_BYTES:
raise ValueError(f"shares must be {TOPRF_Share_BYTES*n} bytes is instead {len(shares)}")
shares = ctypes.create_string_buffer(shares)
xi = ctypes.create_string_buffer(TOPRF_Share_BYTES)
xi[0]=self
liboprf.dkg_finish(n, shares, self, xi)
return xi.raw
#void dkg_reconstruct(const size_t response_len,
# const TOPRF_Share responses[response_len][2],
# uint8_t result[crypto_scalarmult_ristretto255_BYTES]);
def dkg_reconstruct(responses) -> bytes_list_t:
rlen = len(responses)
responses = ctypes.create_string_buffer(b''.join(responses))
result = ctypes.create_string_buffer(pysodium.crypto_core_ristretto255_BYTES)
liboprf.dkg_reconstruct(rlen, responses, result)
return result.raw
tpdkg_sessionid_SIZE=32
tpdkg_msg0_SIZE = 177 # ( sizeof(TP_DKG_Message) \
# + crypto_generichash_BYTES/*dst*/ \
# + 2 /*n,t*/ \
# + crypto_sign_PUBLICKEYBYTES /* tp_sign_pk */)
tpdkg_msg8_SIZE = 256 # (sizeof(TP_DKG_Message) /* header */ \
# + noise_xk_handshake3_SIZE /* 4th&final noise handshake */ \
# + sizeof(TOPRF_Share) /* msg: the noise_xk wrapped share */ \
# + crypto_secretbox_xchacha20poly1305_MACBYTES /* mac of msg */ \
# + crypto_auth_hmacsha256_BYTES /* key-committing mac over msg*/ )
tpdkg_max_err_SIZE = 128
class TP_DKG_Cheater(ctypes.Structure):
_fields_ = [('step', ctypes.c_int),
('error', ctypes.c_int),
('peer', ctypes.c_uint8),
('other_peer', ctypes.c_uint8),
('invalid_index', ctypes.c_int),
]
#int tpdkg_start_tp(TP_DKG_TPState *ctx, const uint64_t ts_epsilon,
# const uint8_t n, const uint8_t t,
# const char *proto_name, const size_t proto_name_len,
# const size_t msg0_len, TP_DKG_Message *msg0);
#
# also wraps conveniently:
#
# void tpdkg_tp_set_bufs(TP_DKG_TPState *ctx,
# uint8_t (*commitments)[][crypto_core_ristretto255_BYTES],
# uint16_t (*complaints)[],
# uint8_t (*suspicious)[],
# uint8_t (*tp_peers_sig_pks)[][crypto_sign_PUBLICKEYBYTES],
# uint8_t (*peer_lt_pks)[][crypto_sign_PUBLICKEYBYTES],
# uint64_t (*last_ts)[]);
def tpdkg_start_tp(n, t, ts_epsilon, proto_name, peer_lt_pks):
b = ctypes.create_string_buffer(liboprf.tpdkg_tpstate_size()+32)
b_addr = ctypes.addressof(b)
s_addr = b_addr + (b_addr % 32)
state = ctypes.c_void_p(s_addr)
if state.value % 32 != 0:
raise ValueError("cannot align at 32bytes the TP_DKG_PeerState struct")
msg = ctypes.create_string_buffer(tpdkg_msg0_SIZE)
__check(liboprf.tpdkg_start_tp(state, ctypes.c_uint64(ts_epsilon), ctypes.c_uint8(n), ctypes.c_uint8(t), proto_name, ctypes.c_size_t(len(proto_name)), ctypes.c_size_t(len(msg.raw)), msg))
peers_sig_pks = ctypes.create_string_buffer(n*pysodium.crypto_sign_PUBLICKEYBYTES)
commitments = ctypes.create_string_buffer(n*t*pysodium.crypto_core_ristretto255_BYTES)
complaints = ctypes.create_string_buffer(n*n*2)
noisy_shares = ctypes.create_string_buffer(n*n*tpdkg_msg8_SIZE)
cheaters = (TP_DKG_Cheater * (t*t - 1))()
peer_lt_pks = b''.join(peer_lt_pks)
last_ts = (ctypes.c_uint64 * n)()
liboprf.tpdkg_tp_set_bufs(state,
ctypes.byref(commitments),
ctypes.byref(complaints),
ctypes.byref(noisy_shares),
ctypes.byref(cheaters),
len(cheaters),
ctypes.byref(peers_sig_pks),
peer_lt_pks,
ctypes.byref(last_ts))
# we need to keep these arrays around, otherwise the gc eats them up.
ctx = (state, cheaters, peers_sig_pks, commitments, complaints, noisy_shares, peer_lt_pks, last_ts, b)
return ctx, msg.raw
#size_t tpdkg_tp_input_size(const TP_DKG_TPState *ctx);
def tpdkg_tp_input_size(ctx):
return liboprf.tpdkg_tp_input_size(ctx[0])
#int tpdkg_tp_input_sizes(const TP_DKG_TPState *ctx, size_t *sizes);
def tpdkg_tp_input_sizes(ctx):
sizes = (ctypes.c_size_t * tpdkg_tpstate_n(ctx))()
ret = liboprf.tpdkg_tp_input_sizes(ctx[0], ctypes.byref(sizes))
return ret, [x for x in sizes]
#size_t tpdkg_tp_output_size(const TP_DKG_TPState *ctx);
def tpdkg_tp_output_size(ctx):
return liboprf.tpdkg_tp_output_size(ctx[0])
#int tpdkg_tp_next(TP_DKG_TPState *ctx, const uint8_t *input, const size_t input_len, uint8_t *output, const size_t output_len);
def tpdkg_tp_next(ctx, msg):
input_len = tpdkg_tp_input_size(ctx)
if len(msg) != input_len: raise ValueError(f"input msg is invalid size: {len(msg)}B must be: {input_len}B")
output_len = tpdkg_tp_output_size(ctx)
output = ctypes.create_string_buffer(output_len)
__check(liboprf.tpdkg_tp_next(ctx[0], msg, ctypes.c_size_t(input_len), output, ctypes.c_size_t(output_len)))
return output
#int tpdkg_tp_peer_msg(const TP_DKG_TPState *ctx, const uint8_t *base, const size_t base_size, const uint8_t peer, const uint8_t **msg, size_t *len);
def tpdkg_tp_peer_msg(ctx, base, peer):
msg = ctypes.POINTER(ctypes.c_char)()
size = ctypes.c_size_t()
__check(liboprf.tpdkg_tp_peer_msg(ctx[0], base, len(base.raw), peer, ctypes.byref(msg), ctypes.byref(size)))
msg = b''.join([msg[i] for i in range(size.value)])
return msg
#int tpdkg_tp_not_done(const TP_DKG_TPState *tp);
def tpdkg_tp_not_done(ctx):
return liboprf.tpdkg_tp_not_done(ctx[0]) == 1
def tpdkg_get_cheaters(ctx):
cheats = []
cheaters = set()
for i in range(tpdkg_tpstate_cheater_len(ctx)):
err = ctypes.create_string_buffer(tpdkg_max_err_SIZE)
p = liboprf.tpdkg_cheater_msg(ctypes.byref(ctx[1][i]), err, tpdkg_max_err_SIZE)
if 0 >= p > tpdkg_tpstate_n(ctx):
print(f"invalid cheater index: {p}, skipping this entry")
continue
cheaters.add(p)
cheats.append((p, err.raw[:err.raw.find(b'\x00')].decode('utf8')))
return cheaters, cheats
liboprf.tpdkg_peerstate_n.restype = ctypes.c_uint8
def tpdkg_peerstate_n(ctx):
return liboprf.tpdkg_peerstate_n(ctx[0])
liboprf.tpdkg_peerstate_t.restype = ctypes.c_uint8
def tpdkg_peerstate_t(ctx):
return liboprf.tpdkg_peerstate_t(ctx[0])
liboprf.tpdkg_peerstate_sessionid.restype = ctypes.POINTER(ctypes.c_uint8)
def tpdkg_peerstate_sessionid(ctx):
ptr = liboprf.tpdkg_peerstate_sessionid(ctx[0])
return bytes(ptr[i] for i in range(tpdkg_sessionid_SIZE))
liboprf.tpdkg_peerstate_lt_sk.restype = ctypes.POINTER(ctypes.c_uint8)
def tpdkg_peerstate_lt_sk(ctx):
ptr = liboprf.tpdkg_peerstate_lt_sk(ctx[0])
return bytes(ptr[i] for i in range(pysodium.crypto_sign_SECRETKEYBYTES))
liboprf.tpdkg_peerstate_share.restype = ctypes.POINTER(ctypes.c_uint8)
def tpdkg_peerstate_share(ctx):
ptr = liboprf.tpdkg_peerstate_share(ctx[0])
return bytes(ptr[i] for i in range(TOPRF_Share_BYTES))
def tpdkg_peerstate_step(ctx):
return liboprf.tpdkg_peerstate_step(ctx[0])
liboprf.tpdkg_tpstate_n.restype = ctypes.c_uint8
def tpdkg_tpstate_n(ctx):
return liboprf.tpdkg_tpstate_n(ctx[0])
liboprf.tpdkg_tpstate_t.restype = ctypes.c_uint8
def tpdkg_tpstate_t(ctx):
return liboprf.tpdkg_tpstate_t(ctx[0])
liboprf.tpdkg_tpstate_cheater_len.restype = ctypes.c_size_t
def tpdkg_tpstate_cheater_len(ctx):
return liboprf.tpdkg_tpstate_cheater_len(ctx[0])
liboprf.tpdkg_tpstate_sessionid.restype = ctypes.POINTER(ctypes.c_uint8)
def tpdkg_tpstate_sessionid(ctx):
ptr = liboprf.tpdkg_tpstate_sessionid(ctx[0])
return bytes(ptr[i] for i in range(tpdkg_sessionid_SIZE))
def tpdkg_tpstate_step(ctx):
return liboprf.tpdkg_tpstate_step(ctx[0])
#int tpdkg_start_peer(TP_DKG_PeerState *ctx, const uint64_t ts_epsilon,
# const uint8_t peer_lt_sk[crypto_sign_SECRETKEYBYTES],
# const TP_DKG_Message *msg0);
#
# also wraps conveniently
#
#void tpdkg_peer_set_bufs(TP_DKG_PeerState *ctx,
# uint8_t (*peers_sig_pks)[][crypto_sign_PUBLICKEYBYTES],
# uint8_t (*peers_noise_pks)[][crypto_scalarmult_BYTES],
# Noise_XK_session_t *(*noise_outs)[],
# Noise_XK_session_t *(*noise_ins)[],
# TOPRF_Share (*shares)[],
# TOPRF_Share (*xshares)[],
# uint8_t (*commitments)[][crypto_core_ristretto255_BYTES],
# uint16_t (*complaints)[],
# uint8_t (*my_complaints)[]);
def tpdkg_peer_start(ts_epsilon, peer_lt_sk, msg0):
b = ctypes.create_string_buffer(liboprf.tpdkg_peerstate_size()+32)
b_addr = ctypes.addressof(b)
s_addr = b_addr + (b_addr % 32)
state = ctypes.c_void_p(s_addr)
if state.value % 32 != 0:
raise ValueError("cannot align at 32bytes the TP_DKG_PeerState struct")
__check(liboprf.tpdkg_start_peer(state, ctypes.c_uint64(ts_epsilon), peer_lt_sk, msg0))
n = tpdkg_peerstate_n([state])
t = tpdkg_peerstate_t([state])
peers_sig_pks = ctypes.create_string_buffer(b"peer_sig_pks", n * pysodium.crypto_sign_PUBLICKEYBYTES)
peers_noise_pks = ctypes.create_string_buffer(b"peer_noise_pks", n * pysodium.crypto_scalarmult_BYTES)
noise_outs = (ctypes.c_void_p * n)()
noise_ins = (ctypes.c_void_p * n)()
shares = ctypes.create_string_buffer(n * TOPRF_Share_BYTES)
xshares = ctypes.create_string_buffer(n * TOPRF_Share_BYTES)
commitments = ctypes.create_string_buffer(n * t * pysodium.crypto_core_ristretto255_BYTES)
complaints = ctypes.create_string_buffer(n * n * 2)
my_complaints = ctypes.create_string_buffer(n)
last_ts = (ctypes.c_uint64 * n)()
liboprf.tpdkg_peer_set_bufs(state,
ctypes.byref(peers_sig_pks),
ctypes.byref(peers_noise_pks),
noise_outs,
noise_ins,
ctypes.byref(shares),
ctypes.byref(xshares),
ctypes.byref(commitments),
ctypes.byref(complaints),
ctypes.byref(my_complaints),
ctypes.byref(last_ts))
# we need to keep these arrays around, otherwise the gc eats them up.
ctx = (state, peers_sig_pks, peers_noise_pks, noise_outs, noise_ins, shares, xshares, commitments, complaints, my_complaints, b, last_ts)
return ctx
#size_t tpdkg_peer_input_size(const TP_DKG_PeerState *ctx);
def tpdkg_peer_input_size(ctx):
return liboprf.tpdkg_peer_input_size(ctx[0])
#size_t tpdkg_peer_output_size(const TP_DKG_PeerState *ctx);
def tpdkg_peer_output_size(ctx):
return liboprf.tpdkg_peer_output_size(ctx[0])
#int tpdkg_peer_next(TP_DKG_PeerState *ctx, const uint8_t *input, const size_t input_len, uint8_t *output, const size_t output_len);
def tpdkg_peer_next(ctx, msg):
input_len = tpdkg_peer_input_size(ctx)
if len(msg) != input_len: raise ValueError(f"input msg is invalid size: {len(msg)}B must be: {input_len}B")
output_len = tpdkg_peer_output_size(ctx)
output = ctypes.create_string_buffer(output_len)
__check(liboprf.tpdkg_peer_next(ctx[0], msg, ctypes.c_size_t(input_len), output, ctypes.c_size_t(output_len)))
return output.raw
#int tpdkg_peer_not_done(const TP_DKG_PeerState *peer);
def tpdkg_peer_not_done(ctx):
return liboprf.tpdkg_peer_not_done(ctx[0]) == 1
#void tpdkg_peer_free(TP_DKG_PeerState *ctx);
def tpdkg_peer_free(ctx):
liboprf.tpdkg_peer_free(ctx[0])
liboprf-0.6.1/python/pyoprf/multiplexer.py 0000775 0000000 0000000 00000013331 14741217270 0020750 0 ustar 00root root 0000000 0000000 #!/usr/bin/env python
import ssl, socket, select
from binascii import a2b_base64
class Peer:
def __init__(self, name, addr, type = "SSL", ssl_cert=None, timeout=5):
self.name = name
self.type = type # currently only TCP or SSL over TCP, but
# could be others like dedicated NOISE_XK,
# or hybrid mceliece+x25519 over USB or
# even UART
self.address = addr # Currently only TCP host:port as a tuple
self.ssl_cert = ssl_cert
self.timeout = timeout
self.state = "new"
self.fd = None
def connect(self):
if self.state == "connected":
raise ValueError(f"{self.name} is already connected")
if self.type not in {"SSL", "TCP"}:
raise ValueError(f"Unsupported peer type: {self.type}")
if self.type == "SSL":
ctx = ssl.create_default_context()
ctx.minimum_version = ssl.TLSVersion.TLSv1_2
if(self.ssl_cert):
ctx.load_verify_locations(self.ssl_cert) # only for dev, production system should use proper certs!
ctx.check_hostname=False # only for dev, production system should use proper certs!
ctx.verify_mode=ssl.CERT_NONE # only for dev, production system should use proper certs!
else:
ctx.load_default_certs()
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.check_hostname = True
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(self.timeout)
if self.type == "SSL":
self.fd = ctx.wrap_socket(s, server_hostname=self.address[0])
self.fd.connect(self.address)
self.state="connected"
def read(self,size):
if self.state != "connected":
raise ValueError(f"{self.name} cannot read, is not connected")
res = []
read = 0
while read= 0}
if not fds: raise ValueError("not enough peers left to get enough results")
#print("select")
r, _,_ =select.select(fds.keys(),[],[],2)
#print("select done")
if not r: continue
#print("got r")
for fd in r:
idx = fds[fd][0]
if idx in responses:
continue
#print(f"gathering {idx}")
pkt = fds[fd][1].read(expectedmsglen)
if pkt == b'\x00\x04fail':
responses[idx]=None
continue
if debug: print(f"{idx} got response of {len(pkt)}")
tmp = pkt if not proc else proc(pkt)
if tmp is None: continue
responses[idx]=tmp
if set((tuple(e) if isinstance(e,list) else e) for e in responses.values())=={None}:
raise ValueError("oracles failed")
if None in responses.values():
if debug: print(f"some reponses failed")
#return {k:v for k,v in responses.items() if v is not None}
return [responses.get(i,None) for i in range(len(self.peers))]
#return responses
def close(self):
for p in self.peers:
p.close()
liboprf-0.6.1/python/pyoprf/noisexk.py 0000775 0000000 0000000 00000023473 14741217270 0020066 0 ustar 00root root 0000000 0000000 #!/usr/bin/env python
"""
Wrapper for hacl-star XK_Noise
SPDX-FileCopyrightText: 2024, Marsiske Stefan
SPDX-License-Identifier: LGPL-3.0-or-later
Copyright (c) 2024, Marsiske Stefan.
All rights reserved.
This file is part of liboprf.
liboprf is free software: you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public License
as published by the Free Software Foundation, either version 3 of
the License, or (at your option) any later version.
liboprf is distributed in the hope that it will be
useful, but WITHOUT ANY WARRANTY; without even the implied
warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
See the GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with liboprf. If not, see .
"""
import ctypes
import ctypes.util
from ctypes import c_void_p, c_ubyte, c_uint32, c_char, c_size_t, POINTER, byref
lib = ctypes.cdll.LoadLibrary(ctypes.util.find_library('oprf-noiseXK')
or ctypes.util.find_library('liboprf-noiseXK'))
if not lib._name:
raise ValueError('Unable to find liboprf-noiseXK')
libc = ctypes.cdll.LoadLibrary(ctypes.util.find_library('c') or ctypes.util.find_library('libc'))
if not libc._name:
raise ValueError('Unable to find libc')
KEYSIZE = 32
NOISE_XK_CONF_ZERO = 0
NOISE_XK_AUTH_KNOWN_SENDER_NO_KCI = 2
NOISE_XK_CONF_STRONG_FORWARD_SECRECY = 5
def __check(code):
if code != 0:
raise ValueError
lib.Noise_XK_device_add_peer.restype = c_void_p
lib.Noise_XK_device_add_peer.argtypes = [c_void_p, c_void_p, ctypes.c_char_p]
def add_peer(device, name, key):
return lib.Noise_XK_device_add_peer(device, name, key)
def pubkey(privkey):
pubkey = ctypes.create_string_buffer(KEYSIZE)
lib.Noise_XK_dh_secret_to_public(pubkey, privkey)
return pubkey.raw
lib.Noise_XK_device_create.restype = c_void_p
def create_device(prologue, name, privkey):
srlz_key = b'\x00'*KEYSIZE
return lib.Noise_XK_device_create(len(prologue), prologue, name, srlz_key, privkey)
lib.Noise_XK_peer_get_id.restype = c_void_p
lib.Noise_XK_peer_get_id.argtypes = [c_void_p]
def get_peerid(peer):
return lib.Noise_XK_peer_get_id(peer)
lib.Noise_XK_session_create_initiator.restype = c_void_p
lib.Noise_XK_session_create_initiator.argtypes = [c_void_p, c_void_p]
def create_session_initiator(device, peerid):
return lib.Noise_XK_session_create_initiator(device, peerid)
lib.Noise_XK_session_create_initiator.restype = c_void_p
lib.Noise_XK_session_create_initiator.argtypes = [c_void_p, c_void_p]
def create_session_initiator(device, peerid):
res = lib.Noise_XK_session_create_initiator(device, peerid)
if res == 0: raise ValueError
return res
lib.Noise_XK_session_create_responder.restype = c_void_p
lib.Noise_XK_session_create_responder.argtypes = [c_void_p]
def create_session_responder(device):
res = lib.Noise_XK_session_create_responder(device)
if res == 0: raise ValueError
return res
lib.Noise_XK_pack_message_with_conf_level.restype = c_void_p
lib.Noise_XK_session_write.argtypes = [c_void_p, c_void_p, POINTER(c_uint32), POINTER(POINTER(c_ubyte))]
lib.Noise_XK_encap_message_p_free.argtypes = [c_void_p]
def initiator_1st_msg(session):
encap_msg = lib.Noise_XK_pack_message_with_conf_level(0, 0, 0);
msg_len = c_uint32()
msg = POINTER(c_ubyte)()
if 0!=lib.Noise_XK_session_write(encap_msg, session, byref(msg_len), byref(msg)):
raise ValueError
lib.Noise_XK_encap_message_p_free(encap_msg)
res = bytes(msg[i] for i in range(msg_len.value))
if msg_len.value > 0:
libc.free(msg)
return res
# Noise_XK_session_read(&encap_msg, bob_session, cipher_msg_len, cipher_msg);
lib.Noise_XK_session_read.argtypes = [POINTER(c_void_p), c_void_p, c_uint32, POINTER(c_ubyte)]
# Noise_XK_unpack_message_with_auth_level(&plain_msg_len, &plain_msg, NOISE_XK_AUTH_ZERO, encap_msg),
def responder_1st_msg(session, msg):
encap_msg = c_void_p()
msg = (c_ubyte * len(msg)).from_buffer(bytearray(msg))
msg_len = c_uint32(len(msg))
if 0 != lib.Noise_XK_session_read(byref(encap_msg), session, msg_len, msg):
raise ValueError
plain_msg_len = c_uint32()
plain_msg = POINTER(c_ubyte)()
if not lib.Noise_XK_unpack_message_with_auth_level(byref(plain_msg_len), byref(plain_msg), 0, encap_msg):
raise ValueError
lib.Noise_XK_encap_message_p_free(encap_msg)
if plain_msg_len.value > 0:
libc.free(plain_msg)
return initiator_1st_msg(session)
def initiator_handshake_finish(session, msg):
encap_msg = c_void_p()
msg = (c_ubyte * len(msg)).from_buffer(bytearray(msg))
msg_len = c_uint32(len(msg))
if 0 != lib.Noise_XK_session_read(byref(encap_msg), session, msg_len, msg):
raise ValueError
plain_msg_len = c_uint32()
plain_msg = POINTER(c_ubyte)()
if not lib.Noise_XK_unpack_message_with_auth_level(byref(plain_msg_len), byref(plain_msg), 0, encap_msg):
raise ValueError
lib.Noise_XK_encap_message_p_free(encap_msg)
if plain_msg_len.value > 0:
libc.free(plain_msg)
def send_msg(session, msg):
if isinstance(msg, str): msg = msg.encode('utf8')
encap_msg = lib.Noise_XK_pack_message_with_conf_level(NOISE_XK_CONF_STRONG_FORWARD_SECRECY, len(msg), msg);
ct_len = c_uint32()
ct = POINTER(c_ubyte)()
if 0!=lib.Noise_XK_session_write(encap_msg, session, byref(ct_len), byref(ct)):
raise ValueError
lib.Noise_XK_encap_message_p_free(encap_msg)
res = bytes(ct[:ct_len.value])
if ct_len.value > 0:
libc.free(ct)
return res
def read_msg(session, msg):
encap_msg = c_void_p()
u_bytes = (c_ubyte * (len(msg)))()
u_bytes[:] = msg
if 0 != lib.Noise_XK_session_read(byref(encap_msg), session, len(msg), u_bytes):
raise ValueError
plain_msg_len = c_uint32()
plain_msg = POINTER(c_ubyte)()
if not lib.Noise_XK_unpack_message_with_auth_level(byref(plain_msg_len), byref(plain_msg),
NOISE_XK_AUTH_KNOWN_SENDER_NO_KCI, encap_msg):
raise ValueError
lib.Noise_XK_encap_message_p_free(encap_msg)
res = bytes(plain_msg[i] for i in range(plain_msg_len.value))
if plain_msg_len.value > 0:
libc.free(plain_msg)
return res
lib.Noise_XK_session_get_peer_id.restype = c_uint32
lib.Noise_XK_session_get_peer_id.argtypes = [c_void_p]
lib.Noise_XK_device_lookup_peer_by_id.restype = c_void_p
lib.Noise_XK_device_lookup_peer_by_id.argtypes = [c_void_p, c_uint32]
lib.Noise_XK_peer_get_static.argtypes = [(c_char * 32), c_void_p]
def get_pubkey(session, device):
peerid = lib.Noise_XK_session_get_peer_id(session)
peer = lib.Noise_XK_device_lookup_peer_by_id(device, peerid);
pubkey = ctypes.create_string_buffer(KEYSIZE)
lib.Noise_XK_peer_get_static(pubkey, peer);
return pubkey.raw
def initiator_session(initiator_privkey, responder_pubkey, iname=None,
rname=None, dst=None):
if dst is None:
dst = b"liboprf-noiseXK"
if iname is None:
iname = b"initiator"
if rname is None:
rname = b"responder"
initiator_pubkey = pubkey(initiator_privkey)
dev = create_device(dst, iname, initiator_privkey)
peer = add_peer(dev, rname, responder_pubkey)
peerid = get_peerid(peer)
session = create_session_initiator(dev, peerid)
msg = initiator_1st_msg(session)
return session, msg
libc.malloc.restype = POINTER(c_ubyte)
def responder_session(responder_privkey, auth_keys, msg, dst=None, name=None):
if dst is None:
dst = b"liboprf-noiseXK"
if name is None:
name = b"responder"
responder_pubkey = pubkey(responder_privkey)
dev = create_device(dst, name, responder_privkey)
for key, peer in auth_keys:
add_peer(dev,peer,key)
session = create_session_responder(dev)
msg = responder_1st_msg(session, msg)
return session, msg
def initiator_session_complete(session, msg):
return initiator_handshake_finish(session, msg)
def test():
from binascii import unhexlify, hexlify
# low level
alice_privkey = unhexlify("c3da55379de9c6908e94ea4df28d084f32eccf03491c71f754b4075577a28552")
alice_pubkey = pubkey(alice_privkey)
bob_privkey = unhexlify("c3da55379de9c6908e94ea4df28d084f32eccf03491c71f754b4075577a28552")
bob_pubkey = pubkey(bob_privkey)
adev = create_device("liboprf-noiseXK test", "Alice", alice_privkey)
bpeer = add_peer(adev, "Bob", bob_pubkey)
bobid = get_peerid(bpeer)
bdev = create_device("liboprf-noiseXK test", "Bob", bob_privkey)
add_peer(bdev, "Alice", alice_pubkey)
asession = create_session_initiator(adev, bobid)
bsession = create_session_responder(bdev)
msg = initiator_1st_msg(asession)
msg = responder_1st_msg(bsession, msg)
initiator_handshake_finish(asession, msg)
ct = send_msg(asession, "hello bob!")
pt = read_msg(bsession, ct)
peer_pk = get_pubkey(bsession, bdev)
print(hexlify(peer_pk))
print(pt)
ct = send_msg(bsession, "hello alice!")
pt = read_msg(asession, ct)
print(pt)
# high-level
a2session, msg = initiator_session(alice_privkey, bob_pubkey)
b2session, msg = responder_session(bob_privkey, [(alice_pubkey, "Alice")], msg)
initiator_session_complete(a2session, msg)
ct = send_msg(a2session, "hello bob!")
pt = read_msg(b2session, ct)
print(pt)
ct = send_msg(b2session, "hello alice!")
pt = read_msg(a2session, ct)
print(pt)
for _ in range(1000):
if ct[0] % 2 == 0:
sender = a2session
receiver = b2session
else:
sender = b2session
receiver = a2session
message = ct[:16+(ct[1]>>4)] * (ct[1] & 0xf)
ct = send_msg(sender, message)
pt = read_msg(receiver, ct)
assert(pt == message)
if __name__ == '__main__':
test()
liboprf-0.6.1/python/setup.py 0000775 0000000 0000000 00000002346 14741217270 0016223 0 ustar 00root root 0000000 0000000 #!/usr/bin/env python
# SPDX-FileCopyrightText: 2023, Marsiske Stefan
# SPDX-License-Identifier: LGPL-3.0-or-later
import os
from setuptools import setup, find_packages
# Utility function to read the README file.
# Used for the long_description. It's nice, because now 1) we have a top level
# README file and 2) it's easier to type in the README file than to put a raw
# string in below ...
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(name = 'pyoprf',
version = '0.6.1',
description = 'python bindings for liboprf',
license = "LGPLv3",
author = 'Stefan Marsiske',
author_email = 'toprf@ctrlc.hu',
url = 'https://github.com/stef/liboprf/python',
long_description=read('README.md'),
long_description_content_type="text/markdown",
packages=find_packages(),
install_requires = ("pysodium", "SecureString"),
classifiers = ["Development Status :: 4 - Beta",
"License :: OSI Approved :: GNU Lesser General Public License v3 or later (LGPLv3+)",
"Topic :: Security :: Cryptography",
"Topic :: Security",
],
#ext_modules = [liboprf],
)
liboprf-0.6.1/python/tests/ 0000775 0000000 0000000 00000000000 14741217270 0015643 5 ustar 00root root 0000000 0000000 liboprf-0.6.1/python/tests/test.py 0000775 0000000 0000000 00000024374 14741217270 0017211 0 ustar 00root root 0000000 0000000 #!/usr/bin/env python3
import unittest
import pyoprf, pysodium, ctypes
from binascii import unhexlify
from itertools import combinations
class TestEndToEnd(unittest.TestCase):
def test_cfrg_irtf(self):
"""CFRG/IRTF spec compliant run"""
# Alice blinds the input "test"
r, alpha = pyoprf.blind(b"test")
# Bob generates a "secret" key
k = pyoprf.keygen()
# Bob evaluates Alices blinded value with it's key
beta = pyoprf.evaluate(k, alpha)
# Alice unblinds Bobs evaluation
N = pyoprf.unblind(r, beta)
# Alice finalizes the calculation
y = pyoprf.finalize(b"test", N)
# rerun and assert that oprf(k,"test") equals all runs
r, alpha = pyoprf.blind(b"test")
beta = pyoprf.evaluate(k, alpha)
N = pyoprf.unblind(r, beta)
y2 = pyoprf.finalize(b"test", N)
self.assertEqual(y, y2)
def test_cfrg_irtf_testvec1(self):
"""IRTF/CFRG testvector 1"""
x = unhexlify("00")
k = unhexlify("5ebcea5ee37023ccb9fc2d2019f9d7737be85591ae8652ffa9ef0f4d37063b0e")
out=unhexlify("527759c3d9366f277d8c6020418d96bb393ba2afb20ff90df23fb7708264e2f3ab9135e3bd69955851de4b1f9fe8a0973396719b7912ba9ee8aa7d0b5e24bcf6")
r, alpha = pyoprf.blind(x)
beta = pyoprf.evaluate(k, alpha)
N = pyoprf.unblind(r, beta)
y = pyoprf.finalize(x, N)
self.assertEqual(y,out)
def test_cfrg_irtf_testvec2(self):
"""IRTF/CFRG testvector 2"""
x=unhexlify("5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a")
k = unhexlify("5ebcea5ee37023ccb9fc2d2019f9d7737be85591ae8652ffa9ef0f4d37063b0e")
out=unhexlify("f4a74c9c592497375e796aa837e907b1a045d34306a749db9f34221f7e750cb4f2a6413a6bf6fa5e19ba6348eb673934a722a7ede2e7621306d18951e7cf2c73")
r, alpha = pyoprf.blind(x)
beta = pyoprf.evaluate(k, alpha)
N = pyoprf.unblind(r, beta)
y = pyoprf.finalize(x, N)
self.assertEqual(y, out)
def test_hashDH_update(self):
"""HashDH with update example"""
# Alice blinds the input "test"
r, alpha = pyoprf.blind(b"test")
# Bob generates a "secret" key
k = pyoprf.keygen()
# Bob evaluates Alices blinded value with it's key
beta = pyoprf.evaluate(k, alpha)
# Alice unblinds Bobs evaluation
N = pyoprf.unblind(r, beta)
# Bob updates his key, by generating delta
delta = pysodium.crypto_core_ristretto255_scalar_random()
k2 = pysodium.crypto_core_ristretto255_scalar_mul(k, delta)
# Alice updates her previous calculation of N with delta
N2 = pysodium.crypto_scalarmult_ristretto255(delta, N)
# rerun hashDH to verify if N2 is equal with a full run
r, alpha = pyoprf.blind(b"test")
beta = pyoprf.evaluate(k2, alpha)
N2_ = pyoprf.unblind(r, beta)
self.assertEqual(N2, N2_)
def test_toprf_sss(self):
"""tOPRF (hashDH), (3,5), with centrally shared key interpolation at client"""
k2 = pyoprf.keygen()
shares = pyoprf.create_shares(k2, 5, 3)
r, alpha = pyoprf.blind(b"test")
#print(' '.join(s.hex() for s in shares))
# we reuse values from te previous test
betas = tuple(s[:1]+pyoprf.evaluate(s[1:], alpha) for s in shares)
#print(''.join(b.hex() for b in betas))
beta = pyoprf.thresholdmult(betas)
Nt = pyoprf.unblind(r, beta)
beta = pyoprf.evaluate(k2, alpha)
N2 = pyoprf.unblind(r, beta)
self.assertEqual(N2, Nt)
def test_toprf_tcombine(self):
"""tOPRF (hashDH), (3,5), with centrally shared key interpolation at servers"""
k2 = pyoprf.keygen()
shares = pyoprf.create_shares(k2, 5, 3)
r, alpha = pyoprf.blind(b"test")
indexes=(4,2,1)
betas = tuple(pyoprf.threshold_evaluate(shares[i-1], alpha, i, indexes) for i in indexes)
beta = pyoprf.threshold_combine(betas)
beta = pyoprf.evaluate(k2, alpha)
Nt = pyoprf.unblind(r, beta)
Nt2 = pyoprf.unblind(r, beta)
self.assertEqual(Nt, Nt2)
def test_raw_dkg(self):
"""naked Distributed KeyGen (3,5)"""
n = 5
t = 3
mailboxes=[[] for _ in range(n)]
commitments=[]
for _ in range(n):
coms, shares = pyoprf.dkg_start(n,t)
commitments.append(coms)
for i,s in enumerate(shares):
mailboxes[i].append(s)
commitments=b''.join(commitments)
shares = []
for i in range(n):
fails = pyoprf.dkg_verify_commitments(n,t,i+1,
commitments,
mailboxes[i])
if len(fails) > 0:
for fail in fails:
print(f"fail: peer {fail}")
raise ValueError("failed to verify contributions, aborting")
xi = pyoprf.dkg_finish(n, mailboxes[i], i+1)
#print(i, xi.hex(), x_i.hex())
shares.append(xi)
# test if the final shares all reproduce the same shared `secret`
v0 = pyoprf.thresholdmult([bytes([i+1])+pysodium.crypto_scalarmult_ristretto255_base(shares[i][1:]) for i in (0,1,2)])
for peers in combinations(range(1,5), 3):
v1 = pyoprf.thresholdmult([bytes([i+1])+pysodium.crypto_scalarmult_ristretto255_base(shares[i][1:]) for i in peers])
self.assertEqual(v0, v1)
secret = pyoprf.dkg_reconstruct(shares[:t])
#print("secret", secret.hex())
self.assertEqual(v0, pysodium.crypto_scalarmult_ristretto255_base(secret))
def test_explicit_3hashtdh(self):
"""toprf based on 2024/1455 [JSPPJ24] https://eprint.iacr.org/2024/1455
using explicit implementation of 3hashtdh"""
print("tOPRF (3hashTDH), (3,5), with centrally shared key interpolation at client")
k2 = pyoprf.keygen()
shares = pyoprf.create_shares(k2, 5, 3)
zero_shares = pyoprf.create_shares(bytes([0]*32), 5, 3)
r, alpha = pyoprf.blind(b"test")
ssid_S = pysodium.randombytes(32)
betas = []
for k, z in zip(shares,zero_shares):
h2 = pyoprf.evaluate(
z[1:],
pysodium.crypto_core_ristretto255_from_hash(pysodium.crypto_generichash(ssid_S + alpha, outlen=64)),
)
beta = pyoprf.evaluate(k[1:], alpha)
betas.append(k[:1]+pysodium.crypto_core_ristretto255_add(beta, h2))
# normal 2hashdh(k2,"test")
beta = pyoprf.evaluate(k2, alpha)
Nt0 = pyoprf.unblind(r, beta)
for peers in combinations(betas, 3):
beta = pyoprf.thresholdmult(betas[:3])
Nt1 = pyoprf.unblind(r, beta)
self.assertEqual(Nt0, Nt1)
def test_native_3hashtdh(self):
"""toprf based on 2024/1455 [JSPPJ24] https://eprint.iacr.org/2024/1455
using libopr native implementation of 3hashtdh
tOPRF (3hashTDH), (3,5), with centrally shared key interpolation at client"""
k2 = pyoprf.keygen()
shares = pyoprf.create_shares(k2, 5, 3)
zero_shares = pyoprf.create_shares(bytes([0]*32), 5, 3)
r, alpha = pyoprf.blind(b"test")
ssid_S = pysodium.randombytes(32)
betas = []
for k, z in zip(shares,zero_shares):
betas.append(pyoprf._3hashtdh(k, z, alpha, ssid_S))
beta = pyoprf.evaluate(k2, alpha)
Nt0 = pyoprf.unblind(r, beta)
for peers in combinations(betas, 3):
beta = pyoprf.thresholdmult(betas[:3])
Nt1 = pyoprf.unblind(r, beta)
self.assertEqual(Nt0, Nt1)
def test_tp_dkg(self):
"""Trusted Party Distributed KeyGeneration"""
n = 5
t = 3
ts_epsilon = 5
# enable verbose logging for tp-dkg
#libc = ctypes.cdll.LoadLibrary('libc.so.6')
#cstderr = ctypes.c_void_p.in_dll(libc, 'stderr')
#log_file = ctypes.c_void_p.in_dll(pyoprf.liboprf,'log_file')
#log_file.value = cstderr.value
# create some long-term keypairs
peer_lt_pks = []
peer_lt_sks = []
for _ in range(n):
pk, sk = pysodium.crypto_sign_keypair()
peer_lt_pks.append(pk)
peer_lt_sks.append(sk)
# initialize the TP and get the first message
tp, msg0 = pyoprf.tpdkg_start_tp(n, t, ts_epsilon, "pyoprf tpdkg test", peer_lt_pks)
print(f"n: {pyoprf.tpdkg_tpstate_n(tp)}, t: {pyoprf.tpdkg_tpstate_t(tp)}, sid: {bytes(c for c in pyoprf.tpdkg_tpstate_sessionid(tp)).hex()}")
# initialize all peers with the 1st message from TP
peers=[]
for i in range(n):
peer = pyoprf.tpdkg_peer_start(ts_epsilon, peer_lt_sks[i], msg0)
peers.append(peer)
for i in range(n):
self.assertEqual(pyoprf.tpdkg_peerstate_sessionid(peers[i]), pyoprf.tpdkg_tpstate_sessionid(tp))
self.assertEqual(peer_lt_sks[i], pyoprf.tpdkg_peerstate_lt_sk(peers[i]))
peer_msgs = []
while pyoprf.tpdkg_tp_not_done(tp):
ret, sizes = pyoprf.tpdkg_tp_input_sizes(tp)
# peer_msgs = (recv(size) for size in sizes)
msgs = b''.join(peer_msgs)
cur_step = pyoprf.tpdkg_tpstate_step(tp)
try:
tp_out = pyoprf.tpdkg_tp_next(tp, msgs)
#print(f"tp: msg[{tp[0].step}]: {tp_out.raw.hex()}")
except Exception as e:
cheaters, cheats = pyoprf.tpdkg_get_cheaters(tp)
print(f"Warning during the distributed key generation the peers misbehaved: {sorted(cheaters)}")
for k, v in cheats:
print(f"\tmisbehaving peer: {k} was caught: {v}")
raise ValueError(f"{e} | tp step {cur_step}")
peer_msgs = []
while(len(b''.join(peer_msgs))==0 and pyoprf.tpdkg_peer_not_done(peers[0])):
for i in range(n):
if(len(tp_out)>0):
msg = pyoprf.tpdkg_tp_peer_msg(tp, tp_out, i)
#print(f"tp -> peer[{i+1}] {msg.hex()}")
else:
msg = ''
out = pyoprf.tpdkg_peer_next(peers[i], msg)
if(len(out)>0):
peer_msgs.append(out)
#print(f"peer[{i+1}] -> tp {peer_msgs[-1].hex()}")
tp_out = ''
# we are done, let's check the shares
shares = [pyoprf.tpdkg_peerstate_share(peers[i]) for i in range(n)]
for i, share in enumerate(shares):
print(f"share[{i+1}] {share.hex()}")
v0 = pyoprf.thresholdmult([bytes([i+1])+pysodium.crypto_scalarmult_ristretto255_base(shares[i][1:]) for i in (0,1,2)])
for peers_idxs in combinations(range(1,5), 3):
v1 = pyoprf.thresholdmult([bytes([i+1])+pysodium.crypto_scalarmult_ristretto255_base(shares[i][1:]) for i in peers_idxs])
self.assertEqual(v0, v1)
secret = pyoprf.dkg_reconstruct(shares[:t])
#print("secret", secret.hex())
self.assertEqual(v0, pysodium.crypto_scalarmult_ristretto255_base(secret))
# clean up allocated buffers
for i in range(n):
pyoprf.tpdkg_peer_free(peers[i])
liboprf-0.6.1/src/ 0000775 0000000 0000000 00000000000 14741217270 0013747 5 ustar 00root root 0000000 0000000 liboprf-0.6.1/src/dkg.c 0000664 0000000 0000000 00000014734 14741217270 0014671 0 ustar 00root root 0000000 0000000 #include
#include
#include
#include "toprf.h"
#include "utils.h"
#include "dkg.h"
/*
@copyright 2023-24, Stefan Marsiske toprf@ctrlc.hu
This file is part of liboprf.
liboprf is free software: you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public License
as published by the Free Software Foundation, either version 3 of
the License, or (at your option) any later version.
liboprf is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the License
along with liboprf. If not, see .
*/
/*
warning this is a low-level interface. Do not use directly unless
you use it to implement DKG protocols which have proper sessionids
and other protections against replay and confused deputy attacks.
for an example of a high-level DKG protocol see tp-dkg.[ch]
*/
// calculates polynomial f(j) given the polynomials threshold coefficients in
// array a
static void polynom(const uint8_t j, const uint8_t threshold,
const uint8_t a[threshold][crypto_core_ristretto255_SCALARBYTES],
TOPRF_Share *result) {
//f(z) = a_0 + a_1*z + a_2*z^2 + a_3*z^3 + ⋯ + (a_t)*(z^t)
result->index=j;
// f(z) = result = a[0] +.....
memcpy(result->value, a[0], crypto_core_ristretto255_SCALARBYTES);
// z = j
uint8_t z[crypto_core_ristretto255_SCALARBYTES]={j};
// z^t ->
for(int t=1;tvalue, result->value, tmp);
}
}
int dkg_start(const uint8_t n,
const uint8_t threshold,
uint8_t commitments[threshold][crypto_core_ristretto255_BYTES],
TOPRF_Share shares[n]) {
uint8_t a[threshold][crypto_core_ristretto255_SCALARBYTES];
if(0!=sodium_mlock(a,sizeof a)) {
return -1;
}
for(int k=0;kvalue, 0, crypto_core_ristretto255_SCALARBYTES);
for(int i=0;ivalue, xi->value, shares[i].value);
//dump((uint8_t*)&shares[i][0], sizeof(TOPRF_Share), "s[%d,%d] ", qual[i], self);
}
//dump(xi->value, crypto_core_ristretto255_SCALARBYTES, "x[%d] ", self);
}
void dkg_reconstruct(const size_t response_len,
const TOPRF_Share responses[response_len],
uint8_t result[crypto_scalarmult_ristretto255_BYTES]) {
uint8_t lpoly[crypto_scalarmult_ristretto255_SCALARBYTES];
uint8_t tmp[crypto_scalarmult_ristretto255_SCALARBYTES];
memset(result,0,crypto_scalarmult_ristretto255_BYTES);
uint8_t indexes[response_len];
for(size_t i=0;i