");
}
goto done;
err:
dump_desc(s_dup, "router descriptor");
routerinfo_free(router);
router = NULL;
done:
tor_cert_free(ntor_cc_cert);
if (tokens) {
SMARTLIST_FOREACH(tokens, directory_token_t *, t, token_clear(t));
smartlist_free(tokens);
}
smartlist_free(exit_policy_tokens);
if (area) {
DUMP_AREA(area, "routerinfo");
memarea_drop_all(area);
}
if (can_dl_again_out)
*can_dl_again_out = can_dl_again;
return router;
}
/** Parse a single extrainfo entry from the string s, ending at
* end. (If end is NULL, parse up to the end of s.) If
* cache_copy is true, make a copy of the extra-info document in the
* cache_info fields of the result. If routermap is provided, use it
* as a map from router identity to routerinfo_t when looking up signing keys.
*
* If can_dl_again_out is provided, set *can_dl_again_out to 1
* if it's okay to try to download an extrainfo with this same digest again,
* and 0 if it isn't. (It might not be okay to download it again if part of
* the part covered by the digest is invalid.)
*/
extrainfo_t *
extrainfo_parse_entry_from_string(const char *s, const char *end,
int cache_copy, struct digest_ri_map_t *routermap,
int *can_dl_again_out)
{
extrainfo_t *extrainfo = NULL;
char digest[128];
smartlist_t *tokens = NULL;
directory_token_t *tok;
crypto_pk_t *key = NULL;
routerinfo_t *router = NULL;
memarea_t *area = NULL;
const char *s_dup = s;
/* Do not set this to '1' until we have parsed everything that we intend to
* parse that's covered by the hash. */
int can_dl_again = 0;
if (BUG(s == NULL))
return NULL;
if (!end) {
end = s + strlen(s);
}
/* point 'end' to a point immediately after the final newline. */
while (end > s+2 && *(end-1) == '\n' && *(end-2) == '\n')
--end;
if (router_get_extrainfo_hash(s, end-s, digest) < 0) {
log_warn(LD_DIR, "Couldn't compute router hash.");
goto err;
}
tokens = smartlist_new();
area = memarea_new();
if (tokenize_string(area,s,end,tokens,extrainfo_token_table,0)) {
log_warn(LD_DIR, "Error tokenizing extra-info document.");
goto err;
}
if (smartlist_len(tokens) < 2) {
log_warn(LD_DIR, "Impossibly short extra-info document.");
goto err;
}
/* XXXX Accept this in position 1 too, and ed identity in position 0. */
tok = smartlist_get(tokens,0);
if (tok->tp != K_EXTRA_INFO) {
log_warn(LD_DIR,"Entry does not start with \"extra-info\"");
goto err;
}
extrainfo = tor_malloc_zero(sizeof(extrainfo_t));
extrainfo->cache_info.is_extrainfo = 1;
if (cache_copy)
extrainfo->cache_info.signed_descriptor_body = tor_memdup_nulterm(s,end-s);
extrainfo->cache_info.signed_descriptor_len = end-s;
memcpy(extrainfo->cache_info.signed_descriptor_digest, digest, DIGEST_LEN);
crypto_digest256((char*)extrainfo->digest256, s, end-s, DIGEST_SHA256);
tor_assert(tok->n_args >= 2);
if (!is_legal_nickname(tok->args[0])) {
log_warn(LD_DIR,"Bad nickname %s on \"extra-info\"",escaped(tok->args[0]));
goto err;
}
strlcpy(extrainfo->nickname, tok->args[0], sizeof(extrainfo->nickname));
if (strlen(tok->args[1]) != HEX_DIGEST_LEN ||
base16_decode(extrainfo->cache_info.identity_digest, DIGEST_LEN,
tok->args[1], HEX_DIGEST_LEN) != DIGEST_LEN) {
log_warn(LD_DIR,"Invalid fingerprint %s on \"extra-info\"",
escaped(tok->args[1]));
goto err;
}
tok = find_by_keyword(tokens, K_PUBLISHED);
if (parse_iso_time(tok->args[0], &extrainfo->cache_info.published_on)) {
log_warn(LD_DIR,"Invalid published time %s on \"extra-info\"",
escaped(tok->args[0]));
goto err;
}
{
directory_token_t *ed_sig_tok, *ed_cert_tok;
ed_sig_tok = find_opt_by_keyword(tokens, K_ROUTER_SIG_ED25519);
ed_cert_tok = find_opt_by_keyword(tokens, K_IDENTITY_ED25519);
int n_ed_toks = !!ed_sig_tok + !!ed_cert_tok;
if (n_ed_toks != 0 && n_ed_toks != 2) {
log_warn(LD_DIR, "Router descriptor with only partial ed25519/"
"cross-certification support");
goto err;
}
if (ed_sig_tok) {
tor_assert(ed_cert_tok);
const int ed_cert_token_pos = smartlist_pos(tokens, ed_cert_tok);
if (ed_cert_token_pos != 1) {
/* Accept this in position 0 XXXX */
log_warn(LD_DIR, "Ed25519 certificate in wrong position");
goto err;
}
if (ed_sig_tok != smartlist_get(tokens, smartlist_len(tokens)-2)) {
log_warn(LD_DIR, "Ed25519 signature in wrong position");
goto err;
}
if (strcmp(ed_cert_tok->object_type, "ED25519 CERT")) {
log_warn(LD_DIR, "Wrong object type on identity-ed25519 in decriptor");
goto err;
}
uint8_t d256[DIGEST256_LEN];
const char *signed_start, *signed_end;
tor_cert_t *cert = tor_cert_parse(
(const uint8_t*)ed_cert_tok->object_body,
ed_cert_tok->object_size);
if (! cert) {
log_warn(LD_DIR, "Couldn't parse ed25519 cert");
goto err;
}
/* makes sure it gets freed. */
extrainfo->cache_info.signing_key_cert = cert;
if (cert->cert_type != CERT_TYPE_ID_SIGNING ||
! cert->signing_key_included) {
log_warn(LD_DIR, "Invalid form for ed25519 cert");
goto err;
}
if (router_get_hash_impl_helper(s, end-s, "extra-info ",
"\nrouter-sig-ed25519",
' ', LOG_WARN,
&signed_start, &signed_end) < 0) {
log_warn(LD_DIR, "Can't find ed25519-signed portion of extrainfo");
goto err;
}
crypto_digest_t *d = crypto_digest256_new(DIGEST_SHA256);
crypto_digest_add_bytes(d, ED_DESC_SIGNATURE_PREFIX,
strlen(ED_DESC_SIGNATURE_PREFIX));
crypto_digest_add_bytes(d, signed_start, signed_end-signed_start);
crypto_digest_get_digest(d, (char*)d256, sizeof(d256));
crypto_digest_free(d);
ed25519_checkable_t check[2];
int check_ok[2];
if (tor_cert_get_checkable_sig(&check[0], cert, NULL, NULL) < 0) {
log_err(LD_BUG, "Couldn't create 'checkable' for cert.");
goto err;
}
if (ed25519_signature_from_base64(&check[1].signature,
ed_sig_tok->args[0])<0) {
log_warn(LD_DIR, "Couldn't decode ed25519 signature");
goto err;
}
check[1].pubkey = &cert->signed_key;
check[1].msg = d256;
check[1].len = DIGEST256_LEN;
if (ed25519_checksig_batch(check_ok, check, 2) < 0) {
log_warn(LD_DIR, "Incorrect ed25519 signature(s)");
goto err;
}
/* We don't check the certificate expiration time: checking that it
* matches the cert in the router descriptor is adequate. */
}
}
/* We've checked everything that's covered by the hash. */
can_dl_again = 1;
if (routermap &&
(router = digestmap_get((digestmap_t*)routermap,
extrainfo->cache_info.identity_digest))) {
key = router->identity_pkey;
}
tok = find_by_keyword(tokens, K_ROUTER_SIGNATURE);
if (strcmp(tok->object_type, "SIGNATURE") ||
tok->object_size < 128 || tok->object_size > 512) {
log_warn(LD_DIR, "Bad object type or length on extra-info signature");
goto err;
}
if (key) {
if (check_signature_token(digest, DIGEST_LEN, tok, key, 0,
"extra-info") < 0)
goto err;
if (router)
extrainfo->cache_info.send_unencrypted =
router->cache_info.send_unencrypted;
} else {
extrainfo->pending_sig = tor_memdup(tok->object_body,
tok->object_size);
extrainfo->pending_sig_len = tok->object_size;
}
goto done;
err:
dump_desc(s_dup, "extra-info descriptor");
extrainfo_free(extrainfo);
extrainfo = NULL;
done:
if (tokens) {
SMARTLIST_FOREACH(tokens, directory_token_t *, t, token_clear(t));
smartlist_free(tokens);
}
if (area) {
DUMP_AREA(area, "extrainfo");
memarea_drop_all(area);
}
if (can_dl_again_out)
*can_dl_again_out = can_dl_again;
return extrainfo;
}
/** Parse a key certificate from s; point end-of-string to
* the first character after the certificate. */
authority_cert_t *
authority_cert_parse_from_string(const char *s, const char **end_of_string)
{
/** Reject any certificate at least this big; it is probably an overflow, an
* attack, a bug, or some other nonsense. */
#define MAX_CERT_SIZE (128*1024)
authority_cert_t *cert = NULL, *old_cert;
smartlist_t *tokens = NULL;
char digest[DIGEST_LEN];
directory_token_t *tok;
char fp_declared[DIGEST_LEN];
char *eos;
size_t len;
int found;
memarea_t *area = NULL;
const char *s_dup = s;
s = eat_whitespace(s);
eos = strstr(s, "\ndir-key-certification");
if (! eos) {
log_warn(LD_DIR, "No signature found on key certificate");
return NULL;
}
eos = strstr(eos, "\n-----END SIGNATURE-----\n");
if (! eos) {
log_warn(LD_DIR, "No end-of-signature found on key certificate");
return NULL;
}
eos = strchr(eos+2, '\n');
tor_assert(eos);
++eos;
len = eos - s;
if (len > MAX_CERT_SIZE) {
log_warn(LD_DIR, "Certificate is far too big (at %lu bytes long); "
"rejecting", (unsigned long)len);
return NULL;
}
tokens = smartlist_new();
area = memarea_new();
if (tokenize_string(area,s, eos, tokens, dir_key_certificate_table, 0) < 0) {
log_warn(LD_DIR, "Error tokenizing key certificate");
goto err;
}
if (router_get_hash_impl(s, strlen(s), digest, "dir-key-certificate-version",
"\ndir-key-certification", '\n', DIGEST_SHA1) < 0)
goto err;
tok = smartlist_get(tokens, 0);
if (tok->tp != K_DIR_KEY_CERTIFICATE_VERSION || strcmp(tok->args[0], "3")) {
log_warn(LD_DIR,
"Key certificate does not begin with a recognized version (3).");
goto err;
}
cert = tor_malloc_zero(sizeof(authority_cert_t));
memcpy(cert->cache_info.signed_descriptor_digest, digest, DIGEST_LEN);
tok = find_by_keyword(tokens, K_DIR_SIGNING_KEY);
tor_assert(tok->key);
cert->signing_key = tok->key;
tok->key = NULL;
if (crypto_pk_get_digest(cert->signing_key, cert->signing_key_digest))
goto err;
tok = find_by_keyword(tokens, K_DIR_IDENTITY_KEY);
tor_assert(tok->key);
cert->identity_key = tok->key;
tok->key = NULL;
tok = find_by_keyword(tokens, K_FINGERPRINT);
tor_assert(tok->n_args);
if (base16_decode(fp_declared, DIGEST_LEN, tok->args[0],
strlen(tok->args[0])) != DIGEST_LEN) {
log_warn(LD_DIR, "Couldn't decode key certificate fingerprint %s",
escaped(tok->args[0]));
goto err;
}
if (crypto_pk_get_digest(cert->identity_key,
cert->cache_info.identity_digest))
goto err;
if (tor_memneq(cert->cache_info.identity_digest, fp_declared, DIGEST_LEN)) {
log_warn(LD_DIR, "Digest of certificate key didn't match declared "
"fingerprint");
goto err;
}
tok = find_opt_by_keyword(tokens, K_DIR_ADDRESS);
if (tok) {
struct in_addr in;
char *address = NULL;
tor_assert(tok->n_args);
/* XXX++ use some tor_addr parse function below instead. -RD */
if (tor_addr_port_split(LOG_WARN, tok->args[0], &address,
&cert->dir_port) < 0 ||
tor_inet_aton(address, &in) == 0) {
log_warn(LD_DIR, "Couldn't parse dir-address in certificate");
tor_free(address);
goto err;
}
cert->addr = ntohl(in.s_addr);
tor_free(address);
}
tok = find_by_keyword(tokens, K_DIR_KEY_PUBLISHED);
if (parse_iso_time(tok->args[0], &cert->cache_info.published_on) < 0) {
goto err;
}
tok = find_by_keyword(tokens, K_DIR_KEY_EXPIRES);
if (parse_iso_time(tok->args[0], &cert->expires) < 0) {
goto err;
}
tok = smartlist_get(tokens, smartlist_len(tokens)-1);
if (tok->tp != K_DIR_KEY_CERTIFICATION) {
log_warn(LD_DIR, "Certificate didn't end with dir-key-certification.");
goto err;
}
/* If we already have this cert, don't bother checking the signature. */
old_cert = authority_cert_get_by_digests(
cert->cache_info.identity_digest,
cert->signing_key_digest);
found = 0;
if (old_cert) {
/* XXXX We could just compare signed_descriptor_digest, but that wouldn't
* buy us much. */
if (old_cert->cache_info.signed_descriptor_len == len &&
old_cert->cache_info.signed_descriptor_body &&
tor_memeq(s, old_cert->cache_info.signed_descriptor_body, len)) {
log_debug(LD_DIR, "We already checked the signature on this "
"certificate; no need to do so again.");
found = 1;
}
}
if (!found) {
if (check_signature_token(digest, DIGEST_LEN, tok, cert->identity_key, 0,
"key certificate")) {
goto err;
}
tok = find_by_keyword(tokens, K_DIR_KEY_CROSSCERT);
if (check_signature_token(cert->cache_info.identity_digest,
DIGEST_LEN,
tok,
cert->signing_key,
CST_NO_CHECK_OBJTYPE,
"key cross-certification")) {
goto err;
}
}
cert->cache_info.signed_descriptor_len = len;
cert->cache_info.signed_descriptor_body = tor_malloc(len+1);
memcpy(cert->cache_info.signed_descriptor_body, s, len);
cert->cache_info.signed_descriptor_body[len] = 0;
cert->cache_info.saved_location = SAVED_NOWHERE;
if (end_of_string) {
*end_of_string = eat_whitespace(eos);
}
SMARTLIST_FOREACH(tokens, directory_token_t *, t, token_clear(t));
smartlist_free(tokens);
if (area) {
DUMP_AREA(area, "authority cert");
memarea_drop_all(area);
}
return cert;
err:
dump_desc(s_dup, "authority cert");
authority_cert_free(cert);
SMARTLIST_FOREACH(tokens, directory_token_t *, t, token_clear(t));
smartlist_free(tokens);
if (area) {
DUMP_AREA(area, "authority cert");
memarea_drop_all(area);
}
return NULL;
}
/** Helper: given a string s, return the start of the next router-status
* object (starting with "r " at the start of a line). If none is found,
* return the start of the directory footer, or the next directory signature.
* If none is found, return the end of the string. */
static inline const char *
find_start_of_next_routerstatus(const char *s)
{
const char *eos, *footer, *sig;
if ((eos = strstr(s, "\nr ")))
++eos;
else
eos = s + strlen(s);
footer = tor_memstr(s, eos-s, "\ndirectory-footer");
sig = tor_memstr(s, eos-s, "\ndirectory-signature");
if (footer && sig)
return MIN(footer, sig) + 1;
else if (footer)
return footer+1;
else if (sig)
return sig+1;
else
return eos;
}
/** Parse the GuardFraction string from a consensus or vote.
*
* If vote or vote_rs are set the document getting
* parsed is a vote routerstatus. Otherwise it's a consensus. This is
* the same semantic as in routerstatus_parse_entry_from_string(). */
STATIC int
routerstatus_parse_guardfraction(const char *guardfraction_str,
networkstatus_t *vote,
vote_routerstatus_t *vote_rs,
routerstatus_t *rs)
{
int ok;
const char *end_of_header = NULL;
int is_consensus = !vote_rs;
uint32_t guardfraction;
tor_assert(bool_eq(vote, vote_rs));
/* If this info comes from a consensus, but we should't apply
guardfraction, just exit. */
if (is_consensus && !should_apply_guardfraction(NULL)) {
return 0;
}
end_of_header = strchr(guardfraction_str, '=');
if (!end_of_header) {
return -1;
}
guardfraction = (uint32_t)tor_parse_ulong(end_of_header+1,
10, 0, 100, &ok, NULL);
if (!ok) {
log_warn(LD_DIR, "Invalid GuardFraction %s", escaped(guardfraction_str));
return -1;
}
log_debug(LD_GENERAL, "[*] Parsed %s guardfraction '%s' for '%s'.",
is_consensus ? "consensus" : "vote",
guardfraction_str, rs->nickname);
if (!is_consensus) { /* We are parsing a vote */
vote_rs->status.guardfraction_percentage = guardfraction;
vote_rs->status.has_guardfraction = 1;
} else {
/* We are parsing a consensus. Only apply guardfraction to guards. */
if (rs->is_possible_guard) {
rs->guardfraction_percentage = guardfraction;
rs->has_guardfraction = 1;
} else {
log_warn(LD_BUG, "Got GuardFraction for non-guard %s. "
"This is not supposed to happen. Not applying. ", rs->nickname);
}
}
return 0;
}
/** Summarize the protocols listed in protocols into out,
* falling back or correcting them based on version as appropriate.
*/
STATIC void
summarize_protover_flags(protover_summary_flags_t *out,
const char *protocols,
const char *version)
{
tor_assert(out);
memset(out, 0, sizeof(*out));
if (protocols) {
out->protocols_known = 1;
out->supports_extend2_cells =
protocol_list_supports_protocol(protocols, PRT_RELAY, 2);
out->supports_ed25519_link_handshake_compat =
protocol_list_supports_protocol(protocols, PRT_LINKAUTH, 3);
out->supports_ed25519_link_handshake_any =
protocol_list_supports_protocol_or_later(protocols, PRT_LINKAUTH, 3);
out->supports_ed25519_hs_intro =
protocol_list_supports_protocol(protocols, PRT_HSINTRO, 4);
out->supports_v3_hsdir =
protocol_list_supports_protocol(protocols, PRT_HSDIR,
PROTOVER_HSDIR_V3);
out->supports_v3_rendezvous_point =
protocol_list_supports_protocol(protocols, PRT_HSREND,
PROTOVER_HS_RENDEZVOUS_POINT_V3);
}
if (version && !strcmpstart(version, "Tor ")) {
if (!out->protocols_known) {
/* The version is a "Tor" version, and where there is no
* list of protocol versions that we should be looking at instead. */
out->supports_extend2_cells =
tor_version_as_new_as(version, "0.2.4.8-alpha");
out->protocols_known = 1;
} else {
/* Bug #22447 forces us to filter on this version. */
if (!tor_version_as_new_as(version, "0.3.0.8")) {
out->supports_v3_hsdir = 0;
}
}
}
}
/** Given a string at *s, containing a routerstatus object, and an
* empty smartlist at tokens, parse and return the first router status
* object in the string, and advance *s to just after the end of the
* router status. Return NULL and advance *s on error.
*
* If vote and vote_rs are provided, don't allocate a fresh
* routerstatus but use vote_rs instead.
*
* If consensus_method is nonzero, this routerstatus is part of a
* consensus, and we should parse it according to the method used to
* make that consensus.
*
* Parse according to the syntax used by the consensus flavor flav.
**/
STATIC routerstatus_t *
routerstatus_parse_entry_from_string(memarea_t *area,
const char **s, smartlist_t *tokens,
networkstatus_t *vote,
vote_routerstatus_t *vote_rs,
int consensus_method,
consensus_flavor_t flav)
{
const char *eos, *s_dup = *s;
routerstatus_t *rs = NULL;
directory_token_t *tok;
char timebuf[ISO_TIME_LEN+1];
struct in_addr in;
int offset = 0;
tor_assert(tokens);
tor_assert(bool_eq(vote, vote_rs));
if (!consensus_method)
flav = FLAV_NS;
tor_assert(flav == FLAV_NS || flav == FLAV_MICRODESC);
eos = find_start_of_next_routerstatus(*s);
if (tokenize_string(area,*s, eos, tokens, rtrstatus_token_table,0)) {
log_warn(LD_DIR, "Error tokenizing router status");
goto err;
}
if (smartlist_len(tokens) < 1) {
log_warn(LD_DIR, "Impossibly short router status");
goto err;
}
tok = find_by_keyword(tokens, K_R);
tor_assert(tok->n_args >= 7); /* guaranteed by GE(7) in K_R setup */
if (flav == FLAV_NS) {
if (tok->n_args < 8) {
log_warn(LD_DIR, "Too few arguments to r");
goto err;
}
} else if (flav == FLAV_MICRODESC) {
offset = -1; /* There is no descriptor digest in an md consensus r line */
}
if (vote_rs) {
rs = &vote_rs->status;
} else {
rs = tor_malloc_zero(sizeof(routerstatus_t));
}
if (!is_legal_nickname(tok->args[0])) {
log_warn(LD_DIR,
"Invalid nickname %s in router status; skipping.",
escaped(tok->args[0]));
goto err;
}
strlcpy(rs->nickname, tok->args[0], sizeof(rs->nickname));
if (digest_from_base64(rs->identity_digest, tok->args[1])) {
log_warn(LD_DIR, "Error decoding identity digest %s",
escaped(tok->args[1]));
goto err;
}
if (flav == FLAV_NS) {
if (digest_from_base64(rs->descriptor_digest, tok->args[2])) {
log_warn(LD_DIR, "Error decoding descriptor digest %s",
escaped(tok->args[2]));
goto err;
}
}
if (tor_snprintf(timebuf, sizeof(timebuf), "%s %s",
tok->args[3+offset], tok->args[4+offset]) < 0 ||
parse_iso_time(timebuf, &rs->published_on)<0) {
log_warn(LD_DIR, "Error parsing time '%s %s' [%d %d]",
tok->args[3+offset], tok->args[4+offset],
offset, (int)flav);
goto err;
}
if (tor_inet_aton(tok->args[5+offset], &in) == 0) {
log_warn(LD_DIR, "Error parsing router address in network-status %s",
escaped(tok->args[5+offset]));
goto err;
}
rs->addr = ntohl(in.s_addr);
rs->or_port = (uint16_t) tor_parse_long(tok->args[6+offset],
10,0,65535,NULL,NULL);
rs->dir_port = (uint16_t) tor_parse_long(tok->args[7+offset],
10,0,65535,NULL,NULL);
{
smartlist_t *a_lines = find_all_by_keyword(tokens, K_A);
if (a_lines) {
find_single_ipv6_orport(a_lines, &rs->ipv6_addr, &rs->ipv6_orport);
smartlist_free(a_lines);
}
}
tok = find_opt_by_keyword(tokens, K_S);
if (tok && vote) {
int i;
vote_rs->flags = 0;
for (i=0; i < tok->n_args; ++i) {
int p = smartlist_string_pos(vote->known_flags, tok->args[i]);
if (p >= 0) {
vote_rs->flags |= (U64_LITERAL(1)<args[i]));
goto err;
}
}
} else if (tok) {
/* This is a consensus, not a vote. */
int i;
for (i=0; i < tok->n_args; ++i) {
if (!strcmp(tok->args[i], "Exit"))
rs->is_exit = 1;
else if (!strcmp(tok->args[i], "Stable"))
rs->is_stable = 1;
else if (!strcmp(tok->args[i], "Fast"))
rs->is_fast = 1;
else if (!strcmp(tok->args[i], "Running"))
rs->is_flagged_running = 1;
else if (!strcmp(tok->args[i], "Named"))
rs->is_named = 1;
else if (!strcmp(tok->args[i], "Valid"))
rs->is_valid = 1;
else if (!strcmp(tok->args[i], "Guard"))
rs->is_possible_guard = 1;
else if (!strcmp(tok->args[i], "BadExit"))
rs->is_bad_exit = 1;
else if (!strcmp(tok->args[i], "Authority"))
rs->is_authority = 1;
else if (!strcmp(tok->args[i], "Unnamed") &&
consensus_method >= 2) {
/* Unnamed is computed right by consensus method 2 and later. */
rs->is_unnamed = 1;
} else if (!strcmp(tok->args[i], "HSDir")) {
rs->is_hs_dir = 1;
} else if (!strcmp(tok->args[i], "V2Dir")) {
rs->is_v2_dir = 1;
}
}
/* These are implied true by having been included in a consensus made
* with a given method */
rs->is_flagged_running = 1; /* Starting with consensus method 4. */
rs->is_valid = 1; /* Starting with consensus method 24. */
}
{
const char *protocols = NULL, *version = NULL;
if ((tok = find_opt_by_keyword(tokens, K_PROTO))) {
tor_assert(tok->n_args == 1);
protocols = tok->args[0];
}
if ((tok = find_opt_by_keyword(tokens, K_V))) {
tor_assert(tok->n_args == 1);
version = tok->args[0];
if (vote_rs) {
vote_rs->version = tor_strdup(tok->args[0]);
}
}
summarize_protover_flags(&rs->pv, protocols, version);
}
/* handle weighting/bandwidth info */
if ((tok = find_opt_by_keyword(tokens, K_W))) {
int i;
for (i=0; i < tok->n_args; ++i) {
if (!strcmpstart(tok->args[i], "Bandwidth=")) {
int ok;
rs->bandwidth_kb =
(uint32_t)tor_parse_ulong(strchr(tok->args[i], '=')+1,
10, 0, UINT32_MAX,
&ok, NULL);
if (!ok) {
log_warn(LD_DIR, "Invalid Bandwidth %s", escaped(tok->args[i]));
goto err;
}
rs->has_bandwidth = 1;
} else if (!strcmpstart(tok->args[i], "Measured=") && vote_rs) {
int ok;
vote_rs->measured_bw_kb =
(uint32_t)tor_parse_ulong(strchr(tok->args[i], '=')+1,
10, 0, UINT32_MAX, &ok, NULL);
if (!ok) {
log_warn(LD_DIR, "Invalid Measured Bandwidth %s",
escaped(tok->args[i]));
goto err;
}
vote_rs->has_measured_bw = 1;
vote->has_measured_bws = 1;
} else if (!strcmpstart(tok->args[i], "Unmeasured=1")) {
rs->bw_is_unmeasured = 1;
} else if (!strcmpstart(tok->args[i], "GuardFraction=")) {
if (routerstatus_parse_guardfraction(tok->args[i],
vote, vote_rs, rs) < 0) {
goto err;
}
}
}
}
/* parse exit policy summaries */
if ((tok = find_opt_by_keyword(tokens, K_P))) {
tor_assert(tok->n_args == 1);
if (strcmpstart(tok->args[0], "accept ") &&
strcmpstart(tok->args[0], "reject ")) {
log_warn(LD_DIR, "Unknown exit policy summary type %s.",
escaped(tok->args[0]));
goto err;
}
/* XXX weasel: parse this into ports and represent them somehow smart,
* maybe not here but somewhere on if we need it for the client.
* we should still parse it here to check it's valid tho.
*/
rs->exitsummary = tor_strdup(tok->args[0]);
rs->has_exitsummary = 1;
}
if (vote_rs) {
SMARTLIST_FOREACH_BEGIN(tokens, directory_token_t *, t) {
if (t->tp == K_M && t->n_args) {
vote_microdesc_hash_t *line =
tor_malloc(sizeof(vote_microdesc_hash_t));
line->next = vote_rs->microdesc;
line->microdesc_hash_line = tor_strdup(t->args[0]);
vote_rs->microdesc = line;
}
if (t->tp == K_ID) {
tor_assert(t->n_args >= 2);
if (!strcmp(t->args[0], "ed25519")) {
vote_rs->has_ed25519_listing = 1;
if (strcmp(t->args[1], "none") &&
digest256_from_base64((char*)vote_rs->ed25519_id,
t->args[1])<0) {
log_warn(LD_DIR, "Bogus ed25519 key in networkstatus vote");
goto err;
}
}
}
if (t->tp == K_PROTO) {
tor_assert(t->n_args == 1);
vote_rs->protocols = tor_strdup(t->args[0]);
}
} SMARTLIST_FOREACH_END(t);
} else if (flav == FLAV_MICRODESC) {
tok = find_opt_by_keyword(tokens, K_M);
if (tok) {
tor_assert(tok->n_args);
if (digest256_from_base64(rs->descriptor_digest, tok->args[0])) {
log_warn(LD_DIR, "Error decoding microdescriptor digest %s",
escaped(tok->args[0]));
goto err;
}
} else {
log_info(LD_BUG, "Found an entry in networkstatus with no "
"microdescriptor digest. (Router %s ($%s) at %s:%d.)",
rs->nickname, hex_str(rs->identity_digest, DIGEST_LEN),
fmt_addr32(rs->addr), rs->or_port);
}
}
if (!strcasecmp(rs->nickname, UNNAMED_ROUTER_NICKNAME))
rs->is_named = 0;
goto done;
err:
dump_desc(s_dup, "routerstatus entry");
if (rs && !vote_rs)
routerstatus_free(rs);
rs = NULL;
done:
SMARTLIST_FOREACH(tokens, directory_token_t *, t, token_clear(t));
smartlist_clear(tokens);
if (area) {
DUMP_AREA(area, "routerstatus entry");
memarea_clear(area);
}
*s = eos;
return rs;
}
int
compare_vote_routerstatus_entries(const void **_a, const void **_b)
{
const vote_routerstatus_t *a = *_a, *b = *_b;
return fast_memcmp(a->status.identity_digest, b->status.identity_digest,
DIGEST_LEN);
}
/** Verify the bandwidth weights of a network status document */
int
networkstatus_verify_bw_weights(networkstatus_t *ns, int consensus_method)
{
int64_t G=0, M=0, E=0, D=0, T=0;
double Wgg, Wgm, Wgd, Wmg, Wmm, Wme, Wmd, Weg, Wem, Wee, Wed;
double Gtotal=0, Mtotal=0, Etotal=0;
const char *casename = NULL;
int valid = 1;
(void) consensus_method;
const int64_t weight_scale = networkstatus_get_weight_scale_param(ns);
tor_assert(weight_scale >= 1);
Wgg = networkstatus_get_bw_weight(ns, "Wgg", -1);
Wgm = networkstatus_get_bw_weight(ns, "Wgm", -1);
Wgd = networkstatus_get_bw_weight(ns, "Wgd", -1);
Wmg = networkstatus_get_bw_weight(ns, "Wmg", -1);
Wmm = networkstatus_get_bw_weight(ns, "Wmm", -1);
Wme = networkstatus_get_bw_weight(ns, "Wme", -1);
Wmd = networkstatus_get_bw_weight(ns, "Wmd", -1);
Weg = networkstatus_get_bw_weight(ns, "Weg", -1);
Wem = networkstatus_get_bw_weight(ns, "Wem", -1);
Wee = networkstatus_get_bw_weight(ns, "Wee", -1);
Wed = networkstatus_get_bw_weight(ns, "Wed", -1);
if (Wgg<0 || Wgm<0 || Wgd<0 || Wmg<0 || Wmm<0 || Wme<0 || Wmd<0 || Weg<0
|| Wem<0 || Wee<0 || Wed<0) {
log_warn(LD_BUG, "No bandwidth weights produced in consensus!");
return 0;
}
// First, sanity check basic summing properties that hold for all cases
// We use > 1 as the check for these because they are computed as integers.
// Sometimes there are rounding errors.
if (fabs(Wmm - weight_scale) > 1) {
log_warn(LD_BUG, "Wmm=%f != "I64_FORMAT,
Wmm, I64_PRINTF_ARG(weight_scale));
valid = 0;
}
if (fabs(Wem - Wee) > 1) {
log_warn(LD_BUG, "Wem=%f != Wee=%f", Wem, Wee);
valid = 0;
}
if (fabs(Wgm - Wgg) > 1) {
log_warn(LD_BUG, "Wgm=%f != Wgg=%f", Wgm, Wgg);
valid = 0;
}
if (fabs(Weg - Wed) > 1) {
log_warn(LD_BUG, "Wed=%f != Weg=%f", Wed, Weg);
valid = 0;
}
if (fabs(Wgg + Wmg - weight_scale) > 0.001*weight_scale) {
log_warn(LD_BUG, "Wgg=%f != "I64_FORMAT" - Wmg=%f", Wgg,
I64_PRINTF_ARG(weight_scale), Wmg);
valid = 0;
}
if (fabs(Wee + Wme - weight_scale) > 0.001*weight_scale) {
log_warn(LD_BUG, "Wee=%f != "I64_FORMAT" - Wme=%f", Wee,
I64_PRINTF_ARG(weight_scale), Wme);
valid = 0;
}
if (fabs(Wgd + Wmd + Wed - weight_scale) > 0.001*weight_scale) {
log_warn(LD_BUG, "Wgd=%f + Wmd=%f + Wed=%f != "I64_FORMAT,
Wgd, Wmd, Wed, I64_PRINTF_ARG(weight_scale));
valid = 0;
}
Wgg /= weight_scale;
Wgm /= weight_scale; (void) Wgm; // unused from here on.
Wgd /= weight_scale;
Wmg /= weight_scale;
Wmm /= weight_scale;
Wme /= weight_scale;
Wmd /= weight_scale;
Weg /= weight_scale; (void) Weg; // unused from here on.
Wem /= weight_scale; (void) Wem; // unused from here on.
Wee /= weight_scale;
Wed /= weight_scale;
// Then, gather G, M, E, D, T to determine case
SMARTLIST_FOREACH_BEGIN(ns->routerstatus_list, routerstatus_t *, rs) {
int is_exit = 0;
/* Bug #2203: Don't count bad exits as exits for balancing */
is_exit = rs->is_exit && !rs->is_bad_exit;
if (rs->has_bandwidth) {
T += rs->bandwidth_kb;
if (is_exit && rs->is_possible_guard) {
D += rs->bandwidth_kb;
Gtotal += Wgd*rs->bandwidth_kb;
Mtotal += Wmd*rs->bandwidth_kb;
Etotal += Wed*rs->bandwidth_kb;
} else if (is_exit) {
E += rs->bandwidth_kb;
Mtotal += Wme*rs->bandwidth_kb;
Etotal += Wee*rs->bandwidth_kb;
} else if (rs->is_possible_guard) {
G += rs->bandwidth_kb;
Gtotal += Wgg*rs->bandwidth_kb;
Mtotal += Wmg*rs->bandwidth_kb;
} else {
M += rs->bandwidth_kb;
Mtotal += Wmm*rs->bandwidth_kb;
}
} else {
log_warn(LD_BUG, "Missing consensus bandwidth for router %s",
routerstatus_describe(rs));
}
} SMARTLIST_FOREACH_END(rs);
// Finally, check equality conditions depending upon case 1, 2 or 3
// Full equality cases: 1, 3b
// Partial equality cases: 2b (E=G), 3a (M=E)
// Fully unknown: 2a
if (3*E >= T && 3*G >= T) {
// Case 1: Neither are scarce
casename = "Case 1";
if (fabs(Etotal-Mtotal) > 0.01*MAX(Etotal,Mtotal)) {
log_warn(LD_DIR,
"Bw Weight Failure for %s: Etotal %f != Mtotal %f. "
"G="I64_FORMAT" M="I64_FORMAT" E="I64_FORMAT" D="I64_FORMAT
" T="I64_FORMAT". "
"Wgg=%f Wgd=%f Wmg=%f Wme=%f Wmd=%f Wee=%f Wed=%f",
casename, Etotal, Mtotal,
I64_PRINTF_ARG(G), I64_PRINTF_ARG(M), I64_PRINTF_ARG(E),
I64_PRINTF_ARG(D), I64_PRINTF_ARG(T),
Wgg, Wgd, Wmg, Wme, Wmd, Wee, Wed);
valid = 0;
}
if (fabs(Etotal-Gtotal) > 0.01*MAX(Etotal,Gtotal)) {
log_warn(LD_DIR,
"Bw Weight Failure for %s: Etotal %f != Gtotal %f. "
"G="I64_FORMAT" M="I64_FORMAT" E="I64_FORMAT" D="I64_FORMAT
" T="I64_FORMAT". "
"Wgg=%f Wgd=%f Wmg=%f Wme=%f Wmd=%f Wee=%f Wed=%f",
casename, Etotal, Gtotal,
I64_PRINTF_ARG(G), I64_PRINTF_ARG(M), I64_PRINTF_ARG(E),
I64_PRINTF_ARG(D), I64_PRINTF_ARG(T),
Wgg, Wgd, Wmg, Wme, Wmd, Wee, Wed);
valid = 0;
}
if (fabs(Gtotal-Mtotal) > 0.01*MAX(Gtotal,Mtotal)) {
log_warn(LD_DIR,
"Bw Weight Failure for %s: Mtotal %f != Gtotal %f. "
"G="I64_FORMAT" M="I64_FORMAT" E="I64_FORMAT" D="I64_FORMAT
" T="I64_FORMAT". "
"Wgg=%f Wgd=%f Wmg=%f Wme=%f Wmd=%f Wee=%f Wed=%f",
casename, Mtotal, Gtotal,
I64_PRINTF_ARG(G), I64_PRINTF_ARG(M), I64_PRINTF_ARG(E),
I64_PRINTF_ARG(D), I64_PRINTF_ARG(T),
Wgg, Wgd, Wmg, Wme, Wmd, Wee, Wed);
valid = 0;
}
} else if (3*E < T && 3*G < T) {
int64_t R = MIN(E, G);
int64_t S = MAX(E, G);
/*
* Case 2: Both Guards and Exits are scarce
* Balance D between E and G, depending upon
* D capacity and scarcity. Devote no extra
* bandwidth to middle nodes.
*/
if (R+D < S) { // Subcase a
double Rtotal, Stotal;
if (E < G) {
Rtotal = Etotal;
Stotal = Gtotal;
} else {
Rtotal = Gtotal;
Stotal = Etotal;
}
casename = "Case 2a";
// Rtotal < Stotal
if (Rtotal > Stotal) {
log_warn(LD_DIR,
"Bw Weight Failure for %s: Rtotal %f > Stotal %f. "
"G="I64_FORMAT" M="I64_FORMAT" E="I64_FORMAT" D="I64_FORMAT
" T="I64_FORMAT". "
"Wgg=%f Wgd=%f Wmg=%f Wme=%f Wmd=%f Wee=%f Wed=%f",
casename, Rtotal, Stotal,
I64_PRINTF_ARG(G), I64_PRINTF_ARG(M), I64_PRINTF_ARG(E),
I64_PRINTF_ARG(D), I64_PRINTF_ARG(T),
Wgg, Wgd, Wmg, Wme, Wmd, Wee, Wed);
valid = 0;
}
// Rtotal < T/3
if (3*Rtotal > T) {
log_warn(LD_DIR,
"Bw Weight Failure for %s: 3*Rtotal %f > T "
I64_FORMAT". G="I64_FORMAT" M="I64_FORMAT" E="I64_FORMAT
" D="I64_FORMAT" T="I64_FORMAT". "
"Wgg=%f Wgd=%f Wmg=%f Wme=%f Wmd=%f Wee=%f Wed=%f",
casename, Rtotal*3, I64_PRINTF_ARG(T),
I64_PRINTF_ARG(G), I64_PRINTF_ARG(M), I64_PRINTF_ARG(E),
I64_PRINTF_ARG(D), I64_PRINTF_ARG(T),
Wgg, Wgd, Wmg, Wme, Wmd, Wee, Wed);
valid = 0;
}
// Stotal < T/3
if (3*Stotal > T) {
log_warn(LD_DIR,
"Bw Weight Failure for %s: 3*Stotal %f > T "
I64_FORMAT". G="I64_FORMAT" M="I64_FORMAT" E="I64_FORMAT
" D="I64_FORMAT" T="I64_FORMAT". "
"Wgg=%f Wgd=%f Wmg=%f Wme=%f Wmd=%f Wee=%f Wed=%f",
casename, Stotal*3, I64_PRINTF_ARG(T),
I64_PRINTF_ARG(G), I64_PRINTF_ARG(M), I64_PRINTF_ARG(E),
I64_PRINTF_ARG(D), I64_PRINTF_ARG(T),
Wgg, Wgd, Wmg, Wme, Wmd, Wee, Wed);
valid = 0;
}
// Mtotal > T/3
if (3*Mtotal < T) {
log_warn(LD_DIR,
"Bw Weight Failure for %s: 3*Mtotal %f < T "
I64_FORMAT". "
"G="I64_FORMAT" M="I64_FORMAT" E="I64_FORMAT" D="I64_FORMAT
" T="I64_FORMAT". "
"Wgg=%f Wgd=%f Wmg=%f Wme=%f Wmd=%f Wee=%f Wed=%f",
casename, Mtotal*3, I64_PRINTF_ARG(T),
I64_PRINTF_ARG(G), I64_PRINTF_ARG(M), I64_PRINTF_ARG(E),
I64_PRINTF_ARG(D), I64_PRINTF_ARG(T),
Wgg, Wgd, Wmg, Wme, Wmd, Wee, Wed);
valid = 0;
}
} else { // Subcase b: R+D > S
casename = "Case 2b";
/* Check the rare-M redirect case. */
if (D != 0 && 3*M < T) {
casename = "Case 2b (balanced)";
if (fabs(Etotal-Mtotal) > 0.01*MAX(Etotal,Mtotal)) {
log_warn(LD_DIR,
"Bw Weight Failure for %s: Etotal %f != Mtotal %f. "
"G="I64_FORMAT" M="I64_FORMAT" E="I64_FORMAT" D="I64_FORMAT
" T="I64_FORMAT". "
"Wgg=%f Wgd=%f Wmg=%f Wme=%f Wmd=%f Wee=%f Wed=%f",
casename, Etotal, Mtotal,
I64_PRINTF_ARG(G), I64_PRINTF_ARG(M), I64_PRINTF_ARG(E),
I64_PRINTF_ARG(D), I64_PRINTF_ARG(T),
Wgg, Wgd, Wmg, Wme, Wmd, Wee, Wed);
valid = 0;
}
if (fabs(Etotal-Gtotal) > 0.01*MAX(Etotal,Gtotal)) {
log_warn(LD_DIR,
"Bw Weight Failure for %s: Etotal %f != Gtotal %f. "
"G="I64_FORMAT" M="I64_FORMAT" E="I64_FORMAT" D="I64_FORMAT
" T="I64_FORMAT". "
"Wgg=%f Wgd=%f Wmg=%f Wme=%f Wmd=%f Wee=%f Wed=%f",
casename, Etotal, Gtotal,
I64_PRINTF_ARG(G), I64_PRINTF_ARG(M), I64_PRINTF_ARG(E),
I64_PRINTF_ARG(D), I64_PRINTF_ARG(T),
Wgg, Wgd, Wmg, Wme, Wmd, Wee, Wed);
valid = 0;
}
if (fabs(Gtotal-Mtotal) > 0.01*MAX(Gtotal,Mtotal)) {
log_warn(LD_DIR,
"Bw Weight Failure for %s: Mtotal %f != Gtotal %f. "
"G="I64_FORMAT" M="I64_FORMAT" E="I64_FORMAT" D="I64_FORMAT
" T="I64_FORMAT". "
"Wgg=%f Wgd=%f Wmg=%f Wme=%f Wmd=%f Wee=%f Wed=%f",
casename, Mtotal, Gtotal,
I64_PRINTF_ARG(G), I64_PRINTF_ARG(M), I64_PRINTF_ARG(E),
I64_PRINTF_ARG(D), I64_PRINTF_ARG(T),
Wgg, Wgd, Wmg, Wme, Wmd, Wee, Wed);
valid = 0;
}
} else {
if (fabs(Etotal-Gtotal) > 0.01*MAX(Etotal,Gtotal)) {
log_warn(LD_DIR,
"Bw Weight Failure for %s: Etotal %f != Gtotal %f. "
"G="I64_FORMAT" M="I64_FORMAT" E="I64_FORMAT" D="I64_FORMAT
" T="I64_FORMAT". "
"Wgg=%f Wgd=%f Wmg=%f Wme=%f Wmd=%f Wee=%f Wed=%f",
casename, Etotal, Gtotal,
I64_PRINTF_ARG(G), I64_PRINTF_ARG(M), I64_PRINTF_ARG(E),
I64_PRINTF_ARG(D), I64_PRINTF_ARG(T),
Wgg, Wgd, Wmg, Wme, Wmd, Wee, Wed);
valid = 0;
}
}
}
} else { // if (E < T/3 || G < T/3) {
int64_t S = MIN(E, G);
int64_t NS = MAX(E, G);
if (3*(S+D) < T) { // Subcase a:
double Stotal;
double NStotal;
if (G < E) {
casename = "Case 3a (G scarce)";
Stotal = Gtotal;
NStotal = Etotal;
} else { // if (G >= E) {
casename = "Case 3a (E scarce)";
NStotal = Gtotal;
Stotal = Etotal;
}
// Stotal < T/3
if (3*Stotal > T) {
log_warn(LD_DIR,
"Bw Weight Failure for %s: 3*Stotal %f > T "
I64_FORMAT". G="I64_FORMAT" M="I64_FORMAT" E="I64_FORMAT
" D="I64_FORMAT" T="I64_FORMAT". "
"Wgg=%f Wgd=%f Wmg=%f Wme=%f Wmd=%f Wee=%f Wed=%f",
casename, Stotal*3, I64_PRINTF_ARG(T),
I64_PRINTF_ARG(G), I64_PRINTF_ARG(M), I64_PRINTF_ARG(E),
I64_PRINTF_ARG(D), I64_PRINTF_ARG(T),
Wgg, Wgd, Wmg, Wme, Wmd, Wee, Wed);
valid = 0;
}
if (NS >= M) {
if (fabs(NStotal-Mtotal) > 0.01*MAX(NStotal,Mtotal)) {
log_warn(LD_DIR,
"Bw Weight Failure for %s: NStotal %f != Mtotal %f. "
"G="I64_FORMAT" M="I64_FORMAT" E="I64_FORMAT" D="I64_FORMAT
" T="I64_FORMAT". "
"Wgg=%f Wgd=%f Wmg=%f Wme=%f Wmd=%f Wee=%f Wed=%f",
casename, NStotal, Mtotal,
I64_PRINTF_ARG(G), I64_PRINTF_ARG(M), I64_PRINTF_ARG(E),
I64_PRINTF_ARG(D), I64_PRINTF_ARG(T),
Wgg, Wgd, Wmg, Wme, Wmd, Wee, Wed);
valid = 0;
}
} else {
// if NS < M, NStotal > T/3 because only one of G or E is scarce
if (3*NStotal < T) {
log_warn(LD_DIR,
"Bw Weight Failure for %s: 3*NStotal %f < T "
I64_FORMAT". G="I64_FORMAT" M="I64_FORMAT
" E="I64_FORMAT" D="I64_FORMAT" T="I64_FORMAT". "
"Wgg=%f Wgd=%f Wmg=%f Wme=%f Wmd=%f Wee=%f Wed=%f",
casename, NStotal*3, I64_PRINTF_ARG(T),
I64_PRINTF_ARG(G), I64_PRINTF_ARG(M), I64_PRINTF_ARG(E),
I64_PRINTF_ARG(D), I64_PRINTF_ARG(T),
Wgg, Wgd, Wmg, Wme, Wmd, Wee, Wed);
valid = 0;
}
}
} else { // Subcase b: S+D >= T/3
casename = "Case 3b";
if (fabs(Etotal-Mtotal) > 0.01*MAX(Etotal,Mtotal)) {
log_warn(LD_DIR,
"Bw Weight Failure for %s: Etotal %f != Mtotal %f. "
"G="I64_FORMAT" M="I64_FORMAT" E="I64_FORMAT" D="I64_FORMAT
" T="I64_FORMAT". "
"Wgg=%f Wgd=%f Wmg=%f Wme=%f Wmd=%f Wee=%f Wed=%f",
casename, Etotal, Mtotal,
I64_PRINTF_ARG(G), I64_PRINTF_ARG(M), I64_PRINTF_ARG(E),
I64_PRINTF_ARG(D), I64_PRINTF_ARG(T),
Wgg, Wgd, Wmg, Wme, Wmd, Wee, Wed);
valid = 0;
}
if (fabs(Etotal-Gtotal) > 0.01*MAX(Etotal,Gtotal)) {
log_warn(LD_DIR,
"Bw Weight Failure for %s: Etotal %f != Gtotal %f. "
"G="I64_FORMAT" M="I64_FORMAT" E="I64_FORMAT" D="I64_FORMAT
" T="I64_FORMAT". "
"Wgg=%f Wgd=%f Wmg=%f Wme=%f Wmd=%f Wee=%f Wed=%f",
casename, Etotal, Gtotal,
I64_PRINTF_ARG(G), I64_PRINTF_ARG(M), I64_PRINTF_ARG(E),
I64_PRINTF_ARG(D), I64_PRINTF_ARG(T),
Wgg, Wgd, Wmg, Wme, Wmd, Wee, Wed);
valid = 0;
}
if (fabs(Gtotal-Mtotal) > 0.01*MAX(Gtotal,Mtotal)) {
log_warn(LD_DIR,
"Bw Weight Failure for %s: Mtotal %f != Gtotal %f. "
"G="I64_FORMAT" M="I64_FORMAT" E="I64_FORMAT" D="I64_FORMAT
" T="I64_FORMAT". "
"Wgg=%f Wgd=%f Wmg=%f Wme=%f Wmd=%f Wee=%f Wed=%f",
casename, Mtotal, Gtotal,
I64_PRINTF_ARG(G), I64_PRINTF_ARG(M), I64_PRINTF_ARG(E),
I64_PRINTF_ARG(D), I64_PRINTF_ARG(T),
Wgg, Wgd, Wmg, Wme, Wmd, Wee, Wed);
valid = 0;
}
}
}
if (valid)
log_notice(LD_DIR, "Bandwidth-weight %s is verified and valid.",
casename);
return valid;
}
/** Check if a shared random value of type srv_type is in
* tokens. If there is, parse it and set it to srv_out. Return
* -1 on failure, 0 on success. The resulting srv is allocated on the heap and
* it's the responsibility of the caller to free it. */
static int
extract_one_srv(smartlist_t *tokens, directory_keyword srv_type,
sr_srv_t **srv_out)
{
int ret = -1;
directory_token_t *tok;
sr_srv_t *srv = NULL;
smartlist_t *chunks;
tor_assert(tokens);
chunks = smartlist_new();
tok = find_opt_by_keyword(tokens, srv_type);
if (!tok) {
/* That's fine, no SRV is allowed. */
ret = 0;
goto end;
}
for (int i = 0; i < tok->n_args; i++) {
smartlist_add(chunks, tok->args[i]);
}
srv = sr_parse_srv(chunks);
if (srv == NULL) {
log_warn(LD_DIR, "SR: Unparseable SRV %s", escaped(tok->object_body));
goto end;
}
/* All is good. */
*srv_out = srv;
ret = 0;
end:
smartlist_free(chunks);
return ret;
}
/** Extract any shared random values found in tokens and place them in
* the networkstatus ns. */
static void
extract_shared_random_srvs(networkstatus_t *ns, smartlist_t *tokens)
{
const char *voter_identity;
networkstatus_voter_info_t *voter;
tor_assert(ns);
tor_assert(tokens);
/* Can be only one of them else code flow. */
tor_assert(ns->type == NS_TYPE_VOTE || ns->type == NS_TYPE_CONSENSUS);
if (ns->type == NS_TYPE_VOTE) {
voter = smartlist_get(ns->voters, 0);
tor_assert(voter);
voter_identity = hex_str(voter->identity_digest,
sizeof(voter->identity_digest));
} else {
/* Consensus has multiple voters so no specific voter. */
voter_identity = "consensus";
}
/* We extract both, and on error everything is stopped because it means
* the vote is malformed for the shared random value(s). */
if (extract_one_srv(tokens, K_PREVIOUS_SRV, &ns->sr_info.previous_srv) < 0) {
log_warn(LD_DIR, "SR: Unable to parse previous SRV from %s",
voter_identity);
/* Maybe we have a chance with the current SRV so let's try it anyway. */
}
if (extract_one_srv(tokens, K_CURRENT_SRV, &ns->sr_info.current_srv) < 0) {
log_warn(LD_DIR, "SR: Unable to parse current SRV from %s",
voter_identity);
}
}
/** Parse a v3 networkstatus vote, opinion, or consensus (depending on
* ns_type), from s, and return the result. Return NULL on failure. */
networkstatus_t *
networkstatus_parse_vote_from_string(const char *s, const char **eos_out,
networkstatus_type_t ns_type)
{
smartlist_t *tokens = smartlist_new();
smartlist_t *rs_tokens = NULL, *footer_tokens = NULL;
networkstatus_voter_info_t *voter = NULL;
networkstatus_t *ns = NULL;
common_digests_t ns_digests;
uint8_t sha3_as_signed[DIGEST256_LEN];
const char *cert, *end_of_header, *end_of_footer, *s_dup = s;
directory_token_t *tok;
struct in_addr in;
int i, inorder, n_signatures = 0;
memarea_t *area = NULL, *rs_area = NULL;
consensus_flavor_t flav = FLAV_NS;
char *last_kwd=NULL;
tor_assert(s);
if (eos_out)
*eos_out = NULL;
if (router_get_networkstatus_v3_hashes(s, &ns_digests) ||
router_get_networkstatus_v3_sha3_as_signed(sha3_as_signed, s)<0) {
log_warn(LD_DIR, "Unable to compute digest of network-status");
goto err;
}
area = memarea_new();
end_of_header = find_start_of_next_routerstatus(s);
if (tokenize_string(area, s, end_of_header, tokens,
(ns_type == NS_TYPE_CONSENSUS) ?
networkstatus_consensus_token_table :
networkstatus_token_table, 0)) {
log_warn(LD_DIR, "Error tokenizing network-status header");
goto err;
}
ns = tor_malloc_zero(sizeof(networkstatus_t));
memcpy(&ns->digests, &ns_digests, sizeof(ns_digests));
memcpy(&ns->digest_sha3_as_signed, sha3_as_signed, sizeof(sha3_as_signed));
tok = find_by_keyword(tokens, K_NETWORK_STATUS_VERSION);
tor_assert(tok);
if (tok->n_args > 1) {
int flavor = networkstatus_parse_flavor_name(tok->args[1]);
if (flavor < 0) {
log_warn(LD_DIR, "Can't parse document with unknown flavor %s",
escaped(tok->args[1]));
goto err;
}
ns->flavor = flav = flavor;
}
if (flav != FLAV_NS && ns_type != NS_TYPE_CONSENSUS) {
log_warn(LD_DIR, "Flavor found on non-consensus networkstatus.");
goto err;
}
if (ns_type != NS_TYPE_CONSENSUS) {
const char *end_of_cert = NULL;
if (!(cert = strstr(s, "\ndir-key-certificate-version")))
goto err;
++cert;
ns->cert = authority_cert_parse_from_string(cert, &end_of_cert);
if (!ns->cert || !end_of_cert || end_of_cert > end_of_header)
goto err;
}
tok = find_by_keyword(tokens, K_VOTE_STATUS);
tor_assert(tok->n_args);
if (!strcmp(tok->args[0], "vote")) {
ns->type = NS_TYPE_VOTE;
} else if (!strcmp(tok->args[0], "consensus")) {
ns->type = NS_TYPE_CONSENSUS;
} else if (!strcmp(tok->args[0], "opinion")) {
ns->type = NS_TYPE_OPINION;
} else {
log_warn(LD_DIR, "Unrecognized vote status %s in network-status",
escaped(tok->args[0]));
goto err;
}
if (ns_type != ns->type) {
log_warn(LD_DIR, "Got the wrong kind of v3 networkstatus.");
goto err;
}
if (ns->type == NS_TYPE_VOTE || ns->type == NS_TYPE_OPINION) {
tok = find_by_keyword(tokens, K_PUBLISHED);
if (parse_iso_time(tok->args[0], &ns->published))
goto err;
ns->supported_methods = smartlist_new();
tok = find_opt_by_keyword(tokens, K_CONSENSUS_METHODS);
if (tok) {
for (i=0; i < tok->n_args; ++i)
smartlist_add_strdup(ns->supported_methods, tok->args[i]);
} else {
smartlist_add_strdup(ns->supported_methods, "1");
}
} else {
tok = find_opt_by_keyword(tokens, K_CONSENSUS_METHOD);
if (tok) {
int num_ok;
ns->consensus_method = (int)tor_parse_long(tok->args[0], 10, 1, INT_MAX,
&num_ok, NULL);
if (!num_ok)
goto err;
} else {
ns->consensus_method = 1;
}
}
if ((tok = find_opt_by_keyword(tokens, K_RECOMMENDED_CLIENT_PROTOCOLS)))
ns->recommended_client_protocols = tor_strdup(tok->args[0]);
if ((tok = find_opt_by_keyword(tokens, K_RECOMMENDED_RELAY_PROTOCOLS)))
ns->recommended_relay_protocols = tor_strdup(tok->args[0]);
if ((tok = find_opt_by_keyword(tokens, K_REQUIRED_CLIENT_PROTOCOLS)))
ns->required_client_protocols = tor_strdup(tok->args[0]);
if ((tok = find_opt_by_keyword(tokens, K_REQUIRED_RELAY_PROTOCOLS)))
ns->required_relay_protocols = tor_strdup(tok->args[0]);
tok = find_by_keyword(tokens, K_VALID_AFTER);
if (parse_iso_time(tok->args[0], &ns->valid_after))
goto err;
tok = find_by_keyword(tokens, K_FRESH_UNTIL);
if (parse_iso_time(tok->args[0], &ns->fresh_until))
goto err;
tok = find_by_keyword(tokens, K_VALID_UNTIL);
if (parse_iso_time(tok->args[0], &ns->valid_until))
goto err;
tok = find_by_keyword(tokens, K_VOTING_DELAY);
tor_assert(tok->n_args >= 2);
{
int ok;
ns->vote_seconds =
(int) tor_parse_long(tok->args[0], 10, 0, INT_MAX, &ok, NULL);
if (!ok)
goto err;
ns->dist_seconds =
(int) tor_parse_long(tok->args[1], 10, 0, INT_MAX, &ok, NULL);
if (!ok)
goto err;
}
if (ns->valid_after +
(get_options()->TestingTorNetwork ?
MIN_VOTE_INTERVAL_TESTING : MIN_VOTE_INTERVAL) > ns->fresh_until) {
log_warn(LD_DIR, "Vote/consensus freshness interval is too short");
goto err;
}
if (ns->valid_after +
(get_options()->TestingTorNetwork ?
MIN_VOTE_INTERVAL_TESTING : MIN_VOTE_INTERVAL)*2 > ns->valid_until) {
log_warn(LD_DIR, "Vote/consensus liveness interval is too short");
goto err;
}
if (ns->vote_seconds < MIN_VOTE_SECONDS) {
log_warn(LD_DIR, "Vote seconds is too short");
goto err;
}
if (ns->dist_seconds < MIN_DIST_SECONDS) {
log_warn(LD_DIR, "Dist seconds is too short");
goto err;
}
if ((tok = find_opt_by_keyword(tokens, K_CLIENT_VERSIONS))) {
ns->client_versions = tor_strdup(tok->args[0]);
}
if ((tok = find_opt_by_keyword(tokens, K_SERVER_VERSIONS))) {
ns->server_versions = tor_strdup(tok->args[0]);
}
{
smartlist_t *package_lst = find_all_by_keyword(tokens, K_PACKAGE);
ns->package_lines = smartlist_new();
if (package_lst) {
SMARTLIST_FOREACH(package_lst, directory_token_t *, t,
smartlist_add_strdup(ns->package_lines, t->args[0]));
}
smartlist_free(package_lst);
}
tok = find_by_keyword(tokens, K_KNOWN_FLAGS);
ns->known_flags = smartlist_new();
inorder = 1;
for (i = 0; i < tok->n_args; ++i) {
smartlist_add_strdup(ns->known_flags, tok->args[i]);
if (i>0 && strcmp(tok->args[i-1], tok->args[i])>= 0) {
log_warn(LD_DIR, "%s >= %s", tok->args[i-1], tok->args[i]);
inorder = 0;
}
}
if (!inorder) {
log_warn(LD_DIR, "known-flags not in order");
goto err;
}
if (ns->type != NS_TYPE_CONSENSUS &&
smartlist_len(ns->known_flags) > MAX_KNOWN_FLAGS_IN_VOTE) {
/* If we allowed more than 64 flags in votes, then parsing them would make
* us invoke undefined behavior whenever we used 1<net_params = smartlist_new();
for (i = 0; i < tok->n_args; ++i) {
int ok=0;
char *eq = strchr(tok->args[i], '=');
size_t eq_pos;
if (!eq) {
log_warn(LD_DIR, "Bad element '%s' in params", escaped(tok->args[i]));
goto err;
}
eq_pos = eq-tok->args[i];
tor_parse_long(eq+1, 10, INT32_MIN, INT32_MAX, &ok, NULL);
if (!ok) {
log_warn(LD_DIR, "Bad element '%s' in params", escaped(tok->args[i]));
goto err;
}
if (i > 0 && strcmp(tok->args[i-1], tok->args[i]) >= 0) {
log_warn(LD_DIR, "%s >= %s", tok->args[i-1], tok->args[i]);
inorder = 0;
}
if (last_kwd && eq_pos == strlen(last_kwd) &&
fast_memeq(last_kwd, tok->args[i], eq_pos)) {
log_warn(LD_DIR, "Duplicate value for %s parameter",
escaped(tok->args[i]));
any_dups = 1;
}
tor_free(last_kwd);
last_kwd = tor_strndup(tok->args[i], eq_pos);
smartlist_add_strdup(ns->net_params, tok->args[i]);
}
if (!inorder) {
log_warn(LD_DIR, "params not in order");
goto err;
}
if (any_dups) {
log_warn(LD_DIR, "Duplicate in parameters");
goto err;
}
}
ns->voters = smartlist_new();
SMARTLIST_FOREACH_BEGIN(tokens, directory_token_t *, _tok) {
tok = _tok;
if (tok->tp == K_DIR_SOURCE) {
tor_assert(tok->n_args >= 6);
if (voter)
smartlist_add(ns->voters, voter);
voter = tor_malloc_zero(sizeof(networkstatus_voter_info_t));
voter->sigs = smartlist_new();
if (ns->type != NS_TYPE_CONSENSUS)
memcpy(voter->vote_digest, ns_digests.d[DIGEST_SHA1], DIGEST_LEN);
voter->nickname = tor_strdup(tok->args[0]);
if (strlen(tok->args[1]) != HEX_DIGEST_LEN ||
base16_decode(voter->identity_digest, sizeof(voter->identity_digest),
tok->args[1], HEX_DIGEST_LEN)
!= sizeof(voter->identity_digest)) {
log_warn(LD_DIR, "Error decoding identity digest %s in "
"network-status document.", escaped(tok->args[1]));
goto err;
}
if (ns->type != NS_TYPE_CONSENSUS &&
tor_memneq(ns->cert->cache_info.identity_digest,
voter->identity_digest, DIGEST_LEN)) {
log_warn(LD_DIR,"Mismatch between identities in certificate and vote");
goto err;
}
if (ns->type != NS_TYPE_CONSENSUS) {
if (authority_cert_is_blacklisted(ns->cert)) {
log_warn(LD_DIR, "Rejecting vote signature made with blacklisted "
"signing key %s",
hex_str(ns->cert->signing_key_digest, DIGEST_LEN));
goto err;
}
}
voter->address = tor_strdup(tok->args[2]);
if (!tor_inet_aton(tok->args[3], &in)) {
log_warn(LD_DIR, "Error decoding IP address %s in network-status.",
escaped(tok->args[3]));
goto err;
}
voter->addr = ntohl(in.s_addr);
int ok;
voter->dir_port = (uint16_t)
tor_parse_long(tok->args[4], 10, 0, 65535, &ok, NULL);
if (!ok)
goto err;
voter->or_port = (uint16_t)
tor_parse_long(tok->args[5], 10, 0, 65535, &ok, NULL);
if (!ok)
goto err;
} else if (tok->tp == K_CONTACT) {
if (!voter || voter->contact) {
log_warn(LD_DIR, "contact element is out of place.");
goto err;
}
voter->contact = tor_strdup(tok->args[0]);
} else if (tok->tp == K_VOTE_DIGEST) {
tor_assert(ns->type == NS_TYPE_CONSENSUS);
tor_assert(tok->n_args >= 1);
if (!voter || ! tor_digest_is_zero(voter->vote_digest)) {
log_warn(LD_DIR, "vote-digest element is out of place.");
goto err;
}
if (strlen(tok->args[0]) != HEX_DIGEST_LEN ||
base16_decode(voter->vote_digest, sizeof(voter->vote_digest),
tok->args[0], HEX_DIGEST_LEN)
!= sizeof(voter->vote_digest)) {
log_warn(LD_DIR, "Error decoding vote digest %s in "
"network-status consensus.", escaped(tok->args[0]));
goto err;
}
}
} SMARTLIST_FOREACH_END(_tok);
if (voter) {
smartlist_add(ns->voters, voter);
voter = NULL;
}
if (smartlist_len(ns->voters) == 0) {
log_warn(LD_DIR, "Missing dir-source elements in a networkstatus.");
goto err;
} else if (ns->type != NS_TYPE_CONSENSUS && smartlist_len(ns->voters) != 1) {
log_warn(LD_DIR, "Too many dir-source elements in a vote networkstatus.");
goto err;
}
if (ns->type != NS_TYPE_CONSENSUS &&
(tok = find_opt_by_keyword(tokens, K_LEGACY_DIR_KEY))) {
int bad = 1;
if (strlen(tok->args[0]) == HEX_DIGEST_LEN) {
networkstatus_voter_info_t *voter_0 = smartlist_get(ns->voters, 0);
if (base16_decode(voter_0->legacy_id_digest, DIGEST_LEN,
tok->args[0], HEX_DIGEST_LEN) != DIGEST_LEN)
bad = 1;
else
bad = 0;
}
if (bad) {
log_warn(LD_DIR, "Invalid legacy key digest %s on vote.",
escaped(tok->args[0]));
}
}
/* If this is a vote document, check if information about the shared
randomness protocol is included, and extract it. */
if (ns->type == NS_TYPE_VOTE) {
dirvote_parse_sr_commits(ns, tokens);
}
/* For both a vote and consensus, extract the shared random values. */
if (ns->type == NS_TYPE_VOTE || ns->type == NS_TYPE_CONSENSUS) {
extract_shared_random_srvs(ns, tokens);
}
/* Parse routerstatus lines. */
rs_tokens = smartlist_new();
rs_area = memarea_new();
s = end_of_header;
ns->routerstatus_list = smartlist_new();
while (!strcmpstart(s, "r ")) {
if (ns->type != NS_TYPE_CONSENSUS) {
vote_routerstatus_t *rs = tor_malloc_zero(sizeof(vote_routerstatus_t));
if (routerstatus_parse_entry_from_string(rs_area, &s, rs_tokens, ns,
rs, 0, 0)) {
smartlist_add(ns->routerstatus_list, rs);
} else {
vote_routerstatus_free(rs);
}
} else {
routerstatus_t *rs;
if ((rs = routerstatus_parse_entry_from_string(rs_area, &s, rs_tokens,
NULL, NULL,
ns->consensus_method,
flav))) {
/* Use exponential-backoff scheduling when downloading microdescs */
smartlist_add(ns->routerstatus_list, rs);
}
}
}
for (i = 1; i < smartlist_len(ns->routerstatus_list); ++i) {
routerstatus_t *rs1, *rs2;
if (ns->type != NS_TYPE_CONSENSUS) {
vote_routerstatus_t *a = smartlist_get(ns->routerstatus_list, i-1);
vote_routerstatus_t *b = smartlist_get(ns->routerstatus_list, i);
rs1 = &a->status; rs2 = &b->status;
} else {
rs1 = smartlist_get(ns->routerstatus_list, i-1);
rs2 = smartlist_get(ns->routerstatus_list, i);
}
if (fast_memcmp(rs1->identity_digest, rs2->identity_digest, DIGEST_LEN)
>= 0) {
log_warn(LD_DIR, "Networkstatus entries not sorted by identity digest");
goto err;
}
}
if (ns_type != NS_TYPE_CONSENSUS) {
digest256map_t *ed_id_map = digest256map_new();
SMARTLIST_FOREACH_BEGIN(ns->routerstatus_list, vote_routerstatus_t *,
vrs) {
if (! vrs->has_ed25519_listing ||
tor_mem_is_zero((const char *)vrs->ed25519_id, DIGEST256_LEN))
continue;
if (digest256map_get(ed_id_map, vrs->ed25519_id) != NULL) {
log_warn(LD_DIR, "Vote networkstatus ed25519 identities were not "
"unique");
digest256map_free(ed_id_map, NULL);
goto err;
}
digest256map_set(ed_id_map, vrs->ed25519_id, (void*)1);
} SMARTLIST_FOREACH_END(vrs);
digest256map_free(ed_id_map, NULL);
}
/* Parse footer; check signature. */
footer_tokens = smartlist_new();
if ((end_of_footer = strstr(s, "\nnetwork-status-version ")))
++end_of_footer;
else
end_of_footer = s + strlen(s);
if (tokenize_string(area,s, end_of_footer, footer_tokens,
networkstatus_vote_footer_token_table, 0)) {
log_warn(LD_DIR, "Error tokenizing network-status vote footer.");
goto err;
}
{
int found_sig = 0;
SMARTLIST_FOREACH_BEGIN(footer_tokens, directory_token_t *, _tok) {
tok = _tok;
if (tok->tp == K_DIRECTORY_SIGNATURE)
found_sig = 1;
else if (found_sig) {
log_warn(LD_DIR, "Extraneous token after first directory-signature");
goto err;
}
} SMARTLIST_FOREACH_END(_tok);
}
if ((tok = find_opt_by_keyword(footer_tokens, K_DIRECTORY_FOOTER))) {
if (tok != smartlist_get(footer_tokens, 0)) {
log_warn(LD_DIR, "Misplaced directory-footer token");
goto err;
}
}
tok = find_opt_by_keyword(footer_tokens, K_BW_WEIGHTS);
if (tok) {
ns->weight_params = smartlist_new();
for (i = 0; i < tok->n_args; ++i) {
int ok=0;
char *eq = strchr(tok->args[i], '=');
if (!eq) {
log_warn(LD_DIR, "Bad element '%s' in weight params",
escaped(tok->args[i]));
goto err;
}
tor_parse_long(eq+1, 10, INT32_MIN, INT32_MAX, &ok, NULL);
if (!ok) {
log_warn(LD_DIR, "Bad element '%s' in params", escaped(tok->args[i]));
goto err;
}
smartlist_add_strdup(ns->weight_params, tok->args[i]);
}
}
SMARTLIST_FOREACH_BEGIN(footer_tokens, directory_token_t *, _tok) {
char declared_identity[DIGEST_LEN];
networkstatus_voter_info_t *v;
document_signature_t *sig;
const char *id_hexdigest = NULL;
const char *sk_hexdigest = NULL;
digest_algorithm_t alg = DIGEST_SHA1;
tok = _tok;
if (tok->tp != K_DIRECTORY_SIGNATURE)
continue;
tor_assert(tok->n_args >= 2);
if (tok->n_args == 2) {
id_hexdigest = tok->args[0];
sk_hexdigest = tok->args[1];
} else {
const char *algname = tok->args[0];
int a;
id_hexdigest = tok->args[1];
sk_hexdigest = tok->args[2];
a = crypto_digest_algorithm_parse_name(algname);
if (a<0) {
log_warn(LD_DIR, "Unknown digest algorithm %s; skipping",
escaped(algname));
continue;
}
alg = a;
}
if (!tok->object_type ||
strcmp(tok->object_type, "SIGNATURE") ||
tok->object_size < 128 || tok->object_size > 512) {
log_warn(LD_DIR, "Bad object type or length on directory-signature");
goto err;
}
if (strlen(id_hexdigest) != HEX_DIGEST_LEN ||
base16_decode(declared_identity, sizeof(declared_identity),
id_hexdigest, HEX_DIGEST_LEN)
!= sizeof(declared_identity)) {
log_warn(LD_DIR, "Error decoding declared identity %s in "
"network-status document.", escaped(id_hexdigest));
goto err;
}
if (!(v = networkstatus_get_voter_by_id(ns, declared_identity))) {
log_warn(LD_DIR, "ID on signature on network-status document does "
"not match any declared directory source.");
goto err;
}
sig = tor_malloc_zero(sizeof(document_signature_t));
memcpy(sig->identity_digest, v->identity_digest, DIGEST_LEN);
sig->alg = alg;
if (strlen(sk_hexdigest) != HEX_DIGEST_LEN ||
base16_decode(sig->signing_key_digest, sizeof(sig->signing_key_digest),
sk_hexdigest, HEX_DIGEST_LEN)
!= sizeof(sig->signing_key_digest)) {
log_warn(LD_DIR, "Error decoding declared signing key digest %s in "
"network-status document.", escaped(sk_hexdigest));
tor_free(sig);
goto err;
}
if (ns->type != NS_TYPE_CONSENSUS) {
if (tor_memneq(declared_identity, ns->cert->cache_info.identity_digest,
DIGEST_LEN)) {
log_warn(LD_DIR, "Digest mismatch between declared and actual on "
"network-status vote.");
tor_free(sig);
goto err;
}
}
if (networkstatus_get_voter_sig_by_alg(v, sig->alg)) {
/* We already parsed a vote with this algorithm from this voter. Use the
first one. */
log_fn(LOG_PROTOCOL_WARN, LD_DIR, "We received a networkstatus "
"that contains two signatures from the same voter with the same "
"algorithm. Ignoring the second signature.");
tor_free(sig);
continue;
}
if (ns->type != NS_TYPE_CONSENSUS) {
if (check_signature_token(ns_digests.d[DIGEST_SHA1], DIGEST_LEN,
tok, ns->cert->signing_key, 0,
"network-status document")) {
tor_free(sig);
goto err;
}
sig->good_signature = 1;
} else {
if (tok->object_size >= INT_MAX || tok->object_size >= SIZE_T_CEILING) {
tor_free(sig);
goto err;
}
sig->signature = tor_memdup(tok->object_body, tok->object_size);
sig->signature_len = (int) tok->object_size;
}
smartlist_add(v->sigs, sig);
++n_signatures;
} SMARTLIST_FOREACH_END(_tok);
if (! n_signatures) {
log_warn(LD_DIR, "No signatures on networkstatus document.");
goto err;
} else if (ns->type == NS_TYPE_VOTE && n_signatures != 1) {
log_warn(LD_DIR, "Received more than one signature on a "
"network-status vote.");
goto err;
}
if (eos_out)
*eos_out = end_of_footer;
goto done;
err:
dump_desc(s_dup, "v3 networkstatus");
networkstatus_vote_free(ns);
ns = NULL;
done:
if (tokens) {
SMARTLIST_FOREACH(tokens, directory_token_t *, t, token_clear(t));
smartlist_free(tokens);
}
if (voter) {
if (voter->sigs) {
SMARTLIST_FOREACH(voter->sigs, document_signature_t *, sig,
document_signature_free(sig));
smartlist_free(voter->sigs);
}
tor_free(voter->nickname);
tor_free(voter->address);
tor_free(voter->contact);
tor_free(voter);
}
if (rs_tokens) {
SMARTLIST_FOREACH(rs_tokens, directory_token_t *, t, token_clear(t));
smartlist_free(rs_tokens);
}
if (footer_tokens) {
SMARTLIST_FOREACH(footer_tokens, directory_token_t *, t, token_clear(t));
smartlist_free(footer_tokens);
}
if (area) {
DUMP_AREA(area, "v3 networkstatus");
memarea_drop_all(area);
}
if (rs_area)
memarea_drop_all(rs_area);
tor_free(last_kwd);
return ns;
}
/** Return the common_digests_t that holds the digests of the
* flavor_name-flavored networkstatus according to the detached
* signatures document sigs, allocating a new common_digests_t as
* needed. */
static common_digests_t *
detached_get_digests(ns_detached_signatures_t *sigs, const char *flavor_name)
{
common_digests_t *d = strmap_get(sigs->digests, flavor_name);
if (!d) {
d = tor_malloc_zero(sizeof(common_digests_t));
strmap_set(sigs->digests, flavor_name, d);
}
return d;
}
/** Return the list of signatures of the flavor_name-flavored
* networkstatus according to the detached signatures document sigs,
* allocating a new common_digests_t as needed. */
static smartlist_t *
detached_get_signatures(ns_detached_signatures_t *sigs,
const char *flavor_name)
{
smartlist_t *sl = strmap_get(sigs->signatures, flavor_name);
if (!sl) {
sl = smartlist_new();
strmap_set(sigs->signatures, flavor_name, sl);
}
return sl;
}
/** Parse a detached v3 networkstatus signature document between s and
* eos and return the result. Return -1 on failure. */
ns_detached_signatures_t *
networkstatus_parse_detached_signatures(const char *s, const char *eos)
{
/* XXXX there is too much duplicate shared between this function and
* networkstatus_parse_vote_from_string(). */
directory_token_t *tok;
memarea_t *area = NULL;
common_digests_t *digests;
smartlist_t *tokens = smartlist_new();
ns_detached_signatures_t *sigs =
tor_malloc_zero(sizeof(ns_detached_signatures_t));
sigs->digests = strmap_new();
sigs->signatures = strmap_new();
if (!eos)
eos = s + strlen(s);
area = memarea_new();
if (tokenize_string(area,s, eos, tokens,
networkstatus_detached_signature_token_table, 0)) {
log_warn(LD_DIR, "Error tokenizing detached networkstatus signatures");
goto err;
}
/* Grab all the digest-like tokens. */
SMARTLIST_FOREACH_BEGIN(tokens, directory_token_t *, _tok) {
const char *algname;
digest_algorithm_t alg;
const char *flavor;
const char *hexdigest;
size_t expected_length, digest_length;
tok = _tok;
if (tok->tp == K_CONSENSUS_DIGEST) {
algname = "sha1";
alg = DIGEST_SHA1;
flavor = "ns";
hexdigest = tok->args[0];
} else if (tok->tp == K_ADDITIONAL_DIGEST) {
int a = crypto_digest_algorithm_parse_name(tok->args[1]);
if (a<0) {
log_warn(LD_DIR, "Unrecognized algorithm name %s", tok->args[0]);
continue;
}
alg = (digest_algorithm_t) a;
flavor = tok->args[0];
algname = tok->args[1];
hexdigest = tok->args[2];
} else {
continue;
}
digest_length = crypto_digest_algorithm_get_length(alg);
expected_length = digest_length * 2; /* hex encoding */
if (strlen(hexdigest) != expected_length) {
log_warn(LD_DIR, "Wrong length on consensus-digest in detached "
"networkstatus signatures");
goto err;
}
digests = detached_get_digests(sigs, flavor);
tor_assert(digests);
if (!tor_mem_is_zero(digests->d[alg], digest_length)) {
log_warn(LD_DIR, "Multiple digests for %s with %s on detached "
"signatures document", flavor, algname);
continue;
}
if (base16_decode(digests->d[alg], digest_length,
hexdigest, strlen(hexdigest)) != (int) digest_length) {
log_warn(LD_DIR, "Bad encoding on consensus-digest in detached "
"networkstatus signatures");
goto err;
}
} SMARTLIST_FOREACH_END(_tok);
tok = find_by_keyword(tokens, K_VALID_AFTER);
if (parse_iso_time(tok->args[0], &sigs->valid_after)) {
log_warn(LD_DIR, "Bad valid-after in detached networkstatus signatures");
goto err;
}
tok = find_by_keyword(tokens, K_FRESH_UNTIL);
if (parse_iso_time(tok->args[0], &sigs->fresh_until)) {
log_warn(LD_DIR, "Bad fresh-until in detached networkstatus signatures");
goto err;
}
tok = find_by_keyword(tokens, K_VALID_UNTIL);
if (parse_iso_time(tok->args[0], &sigs->valid_until)) {
log_warn(LD_DIR, "Bad valid-until in detached networkstatus signatures");
goto err;
}
SMARTLIST_FOREACH_BEGIN(tokens, directory_token_t *, _tok) {
const char *id_hexdigest;
const char *sk_hexdigest;
const char *algname;
const char *flavor;
digest_algorithm_t alg;
char id_digest[DIGEST_LEN];
char sk_digest[DIGEST_LEN];
smartlist_t *siglist;
document_signature_t *sig;
int is_duplicate;
tok = _tok;
if (tok->tp == K_DIRECTORY_SIGNATURE) {
tor_assert(tok->n_args >= 2);
flavor = "ns";
algname = "sha1";
id_hexdigest = tok->args[0];
sk_hexdigest = tok->args[1];
} else if (tok->tp == K_ADDITIONAL_SIGNATURE) {
tor_assert(tok->n_args >= 4);
flavor = tok->args[0];
algname = tok->args[1];
id_hexdigest = tok->args[2];
sk_hexdigest = tok->args[3];
} else {
continue;
}
{
int a = crypto_digest_algorithm_parse_name(algname);
if (a<0) {
log_warn(LD_DIR, "Unrecognized algorithm name %s", algname);
continue;
}
alg = (digest_algorithm_t) a;
}
if (!tok->object_type ||
strcmp(tok->object_type, "SIGNATURE") ||
tok->object_size < 128 || tok->object_size > 512) {
log_warn(LD_DIR, "Bad object type or length on directory-signature");
goto err;
}
if (strlen(id_hexdigest) != HEX_DIGEST_LEN ||
base16_decode(id_digest, sizeof(id_digest),
id_hexdigest, HEX_DIGEST_LEN) != sizeof(id_digest)) {
log_warn(LD_DIR, "Error decoding declared identity %s in "
"network-status vote.", escaped(id_hexdigest));
goto err;
}
if (strlen(sk_hexdigest) != HEX_DIGEST_LEN ||
base16_decode(sk_digest, sizeof(sk_digest),
sk_hexdigest, HEX_DIGEST_LEN) != sizeof(sk_digest)) {
log_warn(LD_DIR, "Error decoding declared signing key digest %s in "
"network-status vote.", escaped(sk_hexdigest));
goto err;
}
siglist = detached_get_signatures(sigs, flavor);
is_duplicate = 0;
SMARTLIST_FOREACH(siglist, document_signature_t *, dsig, {
if (dsig->alg == alg &&
tor_memeq(id_digest, dsig->identity_digest, DIGEST_LEN) &&
tor_memeq(sk_digest, dsig->signing_key_digest, DIGEST_LEN)) {
is_duplicate = 1;
}
});
if (is_duplicate) {
log_warn(LD_DIR, "Two signatures with identical keys and algorithm "
"found.");
continue;
}
sig = tor_malloc_zero(sizeof(document_signature_t));
sig->alg = alg;
memcpy(sig->identity_digest, id_digest, DIGEST_LEN);
memcpy(sig->signing_key_digest, sk_digest, DIGEST_LEN);
if (tok->object_size >= INT_MAX || tok->object_size >= SIZE_T_CEILING) {
tor_free(sig);
goto err;
}
sig->signature = tor_memdup(tok->object_body, tok->object_size);
sig->signature_len = (int) tok->object_size;
smartlist_add(siglist, sig);
} SMARTLIST_FOREACH_END(_tok);
goto done;
err:
ns_detached_signatures_free(sigs);
sigs = NULL;
done:
SMARTLIST_FOREACH(tokens, directory_token_t *, t, token_clear(t));
smartlist_free(tokens);
if (area) {
DUMP_AREA(area, "detached signatures");
memarea_drop_all(area);
}
return sigs;
}
/** Parse the addr policy in the string s and return it. If
* assume_action is nonnegative, then insert its action (ADDR_POLICY_ACCEPT or
* ADDR_POLICY_REJECT) for items that specify no action.
*
* Returns NULL on policy errors.
*
* Set *malformed_list to true if the entire policy list should be
* discarded. Otherwise, set it to false, and only this item should be ignored
* on error - the rest of the policy list can continue to be processed and
* used.
*
* The addr_policy_t returned by this function can have its address set to
* AF_UNSPEC for '*'. Use policy_expand_unspec() to turn this into a pair
* of AF_INET and AF_INET6 items.
*/
MOCK_IMPL(addr_policy_t *,
router_parse_addr_policy_item_from_string,(const char *s, int assume_action,
int *malformed_list))
{
directory_token_t *tok = NULL;
const char *cp, *eos;
/* Longest possible policy is
* "accept6 [ffff:ffff:..255]/128:10000-65535",
* which contains a max-length IPv6 address, plus 26 characters.
* But note that there can be an arbitrary amount of space between the
* accept and the address:mask/port element.
* We don't need to multiply TOR_ADDR_BUF_LEN by 2, as there is only one
* IPv6 address. But making the buffer shorter might cause valid long lines,
* which parsed in previous versions, to fail to parse in new versions.
* (These lines would have to have excessive amounts of whitespace.) */
char line[TOR_ADDR_BUF_LEN*2 + 32];
addr_policy_t *r;
memarea_t *area = NULL;
tor_assert(malformed_list);
*malformed_list = 0;
s = eat_whitespace(s);
/* We can only do assume_action on []-quoted IPv6, as "a" (accept)
* and ":" (port separator) are ambiguous */
if ((*s == '*' || *s == '[' || TOR_ISDIGIT(*s)) && assume_action >= 0) {
if (tor_snprintf(line, sizeof(line), "%s %s",
assume_action == ADDR_POLICY_ACCEPT?"accept":"reject", s)<0) {
log_warn(LD_DIR, "Policy %s is too long.", escaped(s));
return NULL;
}
cp = line;
tor_strlower(line);
} else { /* assume an already well-formed address policy line */
cp = s;
}
eos = cp + strlen(cp);
area = memarea_new();
tok = get_next_token(area, &cp, eos, routerdesc_token_table);
if (tok->tp == ERR_) {
log_warn(LD_DIR, "Error reading address policy: %s", tok->error);
goto err;
}
if (tok->tp != K_ACCEPT && tok->tp != K_ACCEPT6 &&
tok->tp != K_REJECT && tok->tp != K_REJECT6) {
log_warn(LD_DIR, "Expected 'accept' or 'reject'.");
goto err;
}
/* Use the extended interpretation of accept/reject *,
* expanding it into an IPv4 wildcard and an IPv6 wildcard.
* Also permit *4 and *6 for IPv4 and IPv6 only wildcards. */
r = router_parse_addr_policy(tok, TAPMP_EXTENDED_STAR);
if (!r) {
goto err;
}
/* Ensure that accept6/reject6 fields are followed by IPv6 addresses.
* AF_UNSPEC addresses are only permitted on the accept/reject field type.
* Unlike descriptors, torrcs exit policy accept/reject can be followed by
* either an IPv4 or IPv6 address. */
if ((tok->tp == K_ACCEPT6 || tok->tp == K_REJECT6) &&
tor_addr_family(&r->addr) != AF_INET6) {
/* This is a non-fatal error, just ignore this one entry. */
*malformed_list = 0;
log_warn(LD_DIR, "IPv4 address '%s' with accept6/reject6 field type in "
"exit policy. Ignoring, but continuing to parse rules. (Use "
"accept/reject with IPv4 addresses.)",
tok->n_args == 1 ? tok->args[0] : "");
addr_policy_free(r);
r = NULL;
goto done;
}
goto done;
err:
*malformed_list = 1;
r = NULL;
done:
token_clear(tok);
if (area) {
DUMP_AREA(area, "policy item");
memarea_drop_all(area);
}
return r;
}
/** Add an exit policy stored in the token tok to the router info in
* router. Return 0 on success, -1 on failure. */
static int
router_add_exit_policy(routerinfo_t *router, directory_token_t *tok)
{
addr_policy_t *newe;
/* Use the standard interpretation of accept/reject *, an IPv4 wildcard. */
newe = router_parse_addr_policy(tok, 0);
if (!newe)
return -1;
if (! router->exit_policy)
router->exit_policy = smartlist_new();
/* Ensure that in descriptors, accept/reject fields are followed by
* IPv4 addresses, and accept6/reject6 fields are followed by
* IPv6 addresses. Unlike torrcs, descriptor exit policies do not permit
* accept/reject followed by IPv6. */
if (((tok->tp == K_ACCEPT6 || tok->tp == K_REJECT6) &&
tor_addr_family(&newe->addr) == AF_INET)
||
((tok->tp == K_ACCEPT || tok->tp == K_REJECT) &&
tor_addr_family(&newe->addr) == AF_INET6)) {
/* There's nothing the user can do about other relays' descriptors,
* so we don't provide usage advice here. */
log_warn(LD_DIR, "Mismatch between field type and address type in exit "
"policy '%s'. Discarding entire router descriptor.",
tok->n_args == 1 ? tok->args[0] : "");
addr_policy_free(newe);
return -1;
}
smartlist_add(router->exit_policy, newe);
return 0;
}
/** Given a K_ACCEPT[6] or K_REJECT[6] token and a router, create and return
* a new exit_policy_t corresponding to the token. If TAPMP_EXTENDED_STAR
* is set in fmt_flags, K_ACCEPT6 and K_REJECT6 tokens followed by *
* expand to IPv6-only policies, otherwise they expand to IPv4 and IPv6
* policies */
static addr_policy_t *
router_parse_addr_policy(directory_token_t *tok, unsigned fmt_flags)
{
addr_policy_t newe;
char *arg;
tor_assert(tok->tp == K_REJECT || tok->tp == K_REJECT6 ||
tok->tp == K_ACCEPT || tok->tp == K_ACCEPT6);
if (tok->n_args != 1)
return NULL;
arg = tok->args[0];
if (!strcmpstart(arg,"private"))
return router_parse_addr_policy_private(tok);
memset(&newe, 0, sizeof(newe));
if (tok->tp == K_REJECT || tok->tp == K_REJECT6)
newe.policy_type = ADDR_POLICY_REJECT;
else
newe.policy_type = ADDR_POLICY_ACCEPT;
/* accept6/reject6 * produces an IPv6 wildcard address only.
* (accept/reject * produces rules for IPv4 and IPv6 wildcard addresses.) */
if ((fmt_flags & TAPMP_EXTENDED_STAR)
&& (tok->tp == K_ACCEPT6 || tok->tp == K_REJECT6)) {
fmt_flags |= TAPMP_STAR_IPV6_ONLY;
}
if (tor_addr_parse_mask_ports(arg, fmt_flags, &newe.addr, &newe.maskbits,
&newe.prt_min, &newe.prt_max) < 0) {
log_warn(LD_DIR,"Couldn't parse line %s. Dropping", escaped(arg));
return NULL;
}
return addr_policy_get_canonical_entry(&newe);
}
/** Parse an exit policy line of the format "accept[6]/reject[6] private:...".
* This didn't exist until Tor 0.1.1.15, so nobody should generate it in
* router descriptors until earlier versions are obsolete.
*
* accept/reject and accept6/reject6 private all produce rules for both
* IPv4 and IPv6 addresses.
*/
static addr_policy_t *
router_parse_addr_policy_private(directory_token_t *tok)
{
const char *arg;
uint16_t port_min, port_max;
addr_policy_t result;
arg = tok->args[0];
if (strcmpstart(arg, "private"))
return NULL;
arg += strlen("private");
arg = (char*) eat_whitespace(arg);
if (!arg || *arg != ':')
return NULL;
if (parse_port_range(arg+1, &port_min, &port_max)<0)
return NULL;
memset(&result, 0, sizeof(result));
if (tok->tp == K_REJECT || tok->tp == K_REJECT6)
result.policy_type = ADDR_POLICY_REJECT;
else
result.policy_type = ADDR_POLICY_ACCEPT;
result.is_private = 1;
result.prt_min = port_min;
result.prt_max = port_max;
if (tok->tp == K_ACCEPT6 || tok->tp == K_REJECT6) {
log_warn(LD_GENERAL,
"'%s' expands into rules which apply to all private IPv4 and "
"IPv6 addresses. (Use accept/reject private:* for IPv4 and "
"IPv6.)", tok->n_args == 1 ? tok->args[0] : "");
}
return addr_policy_get_canonical_entry(&result);
}
/** Log and exit if t is malformed */
void
assert_addr_policy_ok(smartlist_t *lst)
{
if (!lst) return;
SMARTLIST_FOREACH(lst, addr_policy_t *, t, {
tor_assert(t->policy_type == ADDR_POLICY_REJECT ||
t->policy_type == ADDR_POLICY_ACCEPT);
tor_assert(t->prt_min <= t->prt_max);
});
}
/** Return a newly allocated smartlist of all accept or reject tokens in
* s.
*/
static smartlist_t *
find_all_exitpolicy(smartlist_t *s)
{
smartlist_t *out = smartlist_new();
SMARTLIST_FOREACH(s, directory_token_t *, t,
if (t->tp == K_ACCEPT || t->tp == K_ACCEPT6 ||
t->tp == K_REJECT || t->tp == K_REJECT6)
smartlist_add(out,t));
return out;
}
/** Helper function for router_get_hash_impl: given s,
* s_len, start_str, end_str, and end_c with the
* same semantics as in that function, set *start_out (inclusive) and
* *end_out (exclusive) to the boundaries of the string to be hashed.
*
* Return 0 on success and -1 on failure.
*/
static int
router_get_hash_impl_helper(const char *s, size_t s_len,
const char *start_str,
const char *end_str, char end_c,
int log_severity,
const char **start_out, const char **end_out)
{
const char *start, *end;
start = tor_memstr(s, s_len, start_str);
if (!start) {
log_fn(log_severity,LD_DIR,
"couldn't find start of hashed material \"%s\"",start_str);
return -1;
}
if (start != s && *(start-1) != '\n') {
log_fn(log_severity,LD_DIR,
"first occurrence of \"%s\" is not at the start of a line",
start_str);
return -1;
}
end = tor_memstr(start+strlen(start_str),
s_len - (start-s) - strlen(start_str), end_str);
if (!end) {
log_fn(log_severity,LD_DIR,
"couldn't find end of hashed material \"%s\"",end_str);
return -1;
}
end = memchr(end+strlen(end_str), end_c, s_len - (end-s) - strlen(end_str));
if (!end) {
log_fn(log_severity,LD_DIR,
"couldn't find EOL");
return -1;
}
++end;
*start_out = start;
*end_out = end;
return 0;
}
/** Compute the digest of the substring of s taken from the first
* occurrence of start_str through the first instance of c after the
* first subsequent occurrence of end_str; store the 20-byte or 32-byte
* result in digest; return 0 on success.
*
* If no such substring exists, return -1.
*/
static int
router_get_hash_impl(const char *s, size_t s_len, char *digest,
const char *start_str,
const char *end_str, char end_c,
digest_algorithm_t alg)
{
const char *start=NULL, *end=NULL;
if (router_get_hash_impl_helper(s,s_len,start_str,end_str,end_c,LOG_WARN,
&start,&end)<0)
return -1;
return router_compute_hash_final(digest, start, end-start, alg);
}
/** Compute the digest of the len-byte directory object at
* start, using alg. Store the result in digest, which
* must be long enough to hold it. */
MOCK_IMPL(STATIC int,
router_compute_hash_final,(char *digest,
const char *start, size_t len,
digest_algorithm_t alg))
{
if (alg == DIGEST_SHA1) {
if (crypto_digest(digest, start, len) < 0) {
log_warn(LD_BUG,"couldn't compute digest");
return -1;
}
} else {
if (crypto_digest256(digest, start, len, alg) < 0) {
log_warn(LD_BUG,"couldn't compute digest");
return -1;
}
}
return 0;
}
/** As router_get_hash_impl, but compute all hashes. */
static int
router_get_hashes_impl(const char *s, size_t s_len, common_digests_t *digests,
const char *start_str,
const char *end_str, char end_c)
{
const char *start=NULL, *end=NULL;
if (router_get_hash_impl_helper(s,s_len,start_str,end_str,end_c,LOG_WARN,
&start,&end)<0)
return -1;
if (crypto_common_digests(digests, start, end-start)) {
log_warn(LD_BUG,"couldn't compute digests");
return -1;
}
return 0;
}
/** Assuming that s starts with a microdesc, return the start of the
* *NEXT* one. Return NULL on "not found." */
static const char *
find_start_of_next_microdesc(const char *s, const char *eos)
{
int started_with_annotations;
s = eat_whitespace_eos(s, eos);
if (!s)
return NULL;
#define CHECK_LENGTH() STMT_BEGIN \
if (eos - s < 32) \
return NULL; \
STMT_END
#define NEXT_LINE() STMT_BEGIN \
s = memchr(s, '\n', eos-s); \
if (!s || eos - s <= 1) \
return NULL; \
s++; \
STMT_END
CHECK_LENGTH();
started_with_annotations = (*s == '@');
if (started_with_annotations) {
/* Start by advancing to the first non-annotation line. */
while (*s == '@')
NEXT_LINE();
}
CHECK_LENGTH();
/* Now we should be pointed at an onion-key line. If we are, then skip
* it. */
if (!strcmpstart(s, "onion-key"))
NEXT_LINE();
/* Okay, now we're pointed at the first line of the microdescriptor which is
not an annotation or onion-key. The next line that _is_ an annotation or
onion-key is the start of the next microdescriptor. */
while (eos - s > 32) {
if (*s == '@' || !strcmpstart(s, "onion-key"))
return s;
NEXT_LINE();
}
return NULL;
#undef CHECK_LENGTH
#undef NEXT_LINE
}
/** Parse as many microdescriptors as are found from the string starting at
* s and ending at eos. If allow_annotations is set, read any
* annotations we recognize and ignore ones we don't.
*
* If saved_location isn't SAVED_IN_CACHE, make a local copy of each
* descriptor in the body field of each microdesc_t.
*
* Return all newly parsed microdescriptors in a newly allocated
* smartlist_t. If invalid_disgests_out is provided, add a SHA256
* microdesc digest to it for every microdesc that we found to be badly
* formed. (This may cause duplicates) */
smartlist_t *
microdescs_parse_from_string(const char *s, const char *eos,
int allow_annotations,
saved_location_t where,
smartlist_t *invalid_digests_out)
{
smartlist_t *tokens;
smartlist_t *result;
microdesc_t *md = NULL;
memarea_t *area;
const char *start = s;
const char *start_of_next_microdesc;
int flags = allow_annotations ? TS_ANNOTATIONS_OK : 0;
const int copy_body = (where != SAVED_IN_CACHE);
directory_token_t *tok;
if (!eos)
eos = s + strlen(s);
s = eat_whitespace_eos(s, eos);
area = memarea_new();
result = smartlist_new();
tokens = smartlist_new();
while (s < eos) {
int okay = 0;
start_of_next_microdesc = find_start_of_next_microdesc(s, eos);
if (!start_of_next_microdesc)
start_of_next_microdesc = eos;
md = tor_malloc_zero(sizeof(microdesc_t));
{
const char *cp = tor_memstr(s, start_of_next_microdesc-s,
"onion-key");
const int no_onion_key = (cp == NULL);
if (no_onion_key) {
cp = s; /* So that we have *some* junk to put in the body */
}
md->bodylen = start_of_next_microdesc - cp;
md->saved_location = where;
if (copy_body)
md->body = tor_memdup_nulterm(cp, md->bodylen);
else
md->body = (char*)cp;
md->off = cp - start;
crypto_digest256(md->digest, md->body, md->bodylen, DIGEST_SHA256);
if (no_onion_key) {
log_fn(LOG_PROTOCOL_WARN, LD_DIR, "Malformed or truncated descriptor");
goto next;
}
}
if (tokenize_string(area, s, start_of_next_microdesc, tokens,
microdesc_token_table, flags)) {
log_warn(LD_DIR, "Unparseable microdescriptor");
goto next;
}
if ((tok = find_opt_by_keyword(tokens, A_LAST_LISTED))) {
if (parse_iso_time(tok->args[0], &md->last_listed)) {
log_warn(LD_DIR, "Bad last-listed time in microdescriptor");
goto next;
}
}
tok = find_by_keyword(tokens, K_ONION_KEY);
if (!crypto_pk_public_exponent_ok(tok->key)) {
log_warn(LD_DIR,
"Relay's onion key had invalid exponent.");
goto next;
}
md->onion_pkey = tok->key;
tok->key = NULL;
if ((tok = find_opt_by_keyword(tokens, K_ONION_KEY_NTOR))) {
curve25519_public_key_t k;
tor_assert(tok->n_args >= 1);
if (curve25519_public_from_base64(&k, tok->args[0]) < 0) {
log_warn(LD_DIR, "Bogus ntor-onion-key in microdesc");
goto next;
}
md->onion_curve25519_pkey =
tor_memdup(&k, sizeof(curve25519_public_key_t));
}
smartlist_t *id_lines = find_all_by_keyword(tokens, K_ID);
if (id_lines) {
SMARTLIST_FOREACH_BEGIN(id_lines, directory_token_t *, t) {
tor_assert(t->n_args >= 2);
if (!strcmp(t->args[0], "ed25519")) {
if (md->ed25519_identity_pkey) {
log_warn(LD_DIR, "Extra ed25519 key in microdesc");
smartlist_free(id_lines);
goto next;
}
ed25519_public_key_t k;
if (ed25519_public_from_base64(&k, t->args[1])<0) {
log_warn(LD_DIR, "Bogus ed25519 key in microdesc");
smartlist_free(id_lines);
goto next;
}
md->ed25519_identity_pkey = tor_memdup(&k, sizeof(k));
}
} SMARTLIST_FOREACH_END(t);
smartlist_free(id_lines);
}
{
smartlist_t *a_lines = find_all_by_keyword(tokens, K_A);
if (a_lines) {
find_single_ipv6_orport(a_lines, &md->ipv6_addr, &md->ipv6_orport);
smartlist_free(a_lines);
}
}
if ((tok = find_opt_by_keyword(tokens, K_FAMILY))) {
int i;
md->family = smartlist_new();
for (i=0;in_args;++i) {
if (!is_legal_nickname_or_hexdigest(tok->args[i])) {
log_warn(LD_DIR, "Illegal nickname %s in family line",
escaped(tok->args[i]));
goto next;
}
smartlist_add_strdup(md->family, tok->args[i]);
}
}
if ((tok = find_opt_by_keyword(tokens, K_P))) {
md->exit_policy = parse_short_policy(tok->args[0]);
}
if ((tok = find_opt_by_keyword(tokens, K_P6))) {
md->ipv6_exit_policy = parse_short_policy(tok->args[0]);
}
smartlist_add(result, md);
okay = 1;
md = NULL;
next:
if (! okay && invalid_digests_out) {
smartlist_add(invalid_digests_out,
tor_memdup(md->digest, DIGEST256_LEN));
}
microdesc_free(md);
md = NULL;
SMARTLIST_FOREACH(tokens, directory_token_t *, t, token_clear(t));
memarea_clear(area);
smartlist_clear(tokens);
s = start_of_next_microdesc;
}
SMARTLIST_FOREACH(tokens, directory_token_t *, t, token_clear(t));
memarea_drop_all(area);
smartlist_free(tokens);
return result;
}
/** Extract a Tor version from a platform line from a router
* descriptor, and place the result in router_version.
*
* Return 1 on success, -1 on parsing failure, and 0 if the
* platform line does not indicate some version of Tor.
*
* If strict is non-zero, finding any weird version components
* (like negative numbers) counts as a parsing failure.
*/
int
tor_version_parse_platform(const char *platform,
tor_version_t *router_version,
int strict)
{
char tmp[128];
char *s, *s2, *start;
if (strcmpstart(platform,"Tor ")) /* nonstandard Tor; say 0. */
return 0;
start = (char *)eat_whitespace(platform+3);
if (!*start) return -1;
s = (char *)find_whitespace(start); /* also finds '\0', which is fine */
s2 = (char*)eat_whitespace(s);
if (!strcmpstart(s2, "(r") || !strcmpstart(s2, "(git-"))
s = (char*)find_whitespace(s2);
if ((size_t)(s-start+1) >= sizeof(tmp)) /* too big, no */
return -1;
strlcpy(tmp, start, s-start+1);
if (tor_version_parse(tmp, router_version)<0) {
log_info(LD_DIR,"Router version '%s' unparseable.",tmp);
return -1;
}
if (strict) {
if (router_version->major < 0 ||
router_version->minor < 0 ||
router_version->micro < 0 ||
router_version->patchlevel < 0 ||
router_version->svn_revision < 0) {
return -1;
}
}
return 1;
}
/** Parse the Tor version of the platform string platform,
* and compare it to the version in cutoff. Return 1 if
* the router is at least as new as the cutoff, else return 0.
*/
int
tor_version_as_new_as(const char *platform, const char *cutoff)
{
tor_version_t cutoff_version, router_version;
int r;
tor_assert(platform);
if (tor_version_parse(cutoff, &cutoff_version)<0) {
log_warn(LD_BUG,"cutoff version '%s' unparseable.",cutoff);
return 0;
}
r = tor_version_parse_platform(platform, &router_version, 0);
if (r == 0) {
/* nonstandard Tor; be safe and say yes */
return 1;
} else if (r < 0) {
/* unparseable version; be safe and say yes. */
return 1;
}
/* Here's why we don't need to do any special handling for svn revisions:
* - If neither has an svn revision, we're fine.
* - If the router doesn't have an svn revision, we can't assume that it
* is "at least" any svn revision, so we need to return 0.
* - If the target version doesn't have an svn revision, any svn revision
* (or none at all) is good enough, so return 1.
* - If both target and router have an svn revision, we compare them.
*/
return tor_version_compare(&router_version, &cutoff_version) >= 0;
}
/** Parse a tor version from s, and store the result in out.
* Return 0 on success, -1 on failure. */
int
tor_version_parse(const char *s, tor_version_t *out)
{
char *eos=NULL;
const char *cp=NULL;
int ok = 1;
/* Format is:
* "Tor " ? NUM dot NUM [ dot NUM [ ( pre | rc | dot ) NUM ] ] [ - tag ]
*/
tor_assert(s);
tor_assert(out);
memset(out, 0, sizeof(tor_version_t));
out->status = VER_RELEASE;
if (!strcasecmpstart(s, "Tor "))
s += 4;
cp = s;
#define NUMBER(m) \
do { \
if (!cp || *cp < '0' || *cp > '9') \
return -1; \
out->m = (int)tor_parse_uint64(cp, 10, 0, INT32_MAX, &ok, &eos); \
if (!ok) \
return -1; \
if (!eos || eos == cp) \
return -1; \
cp = eos; \
} while (0)
#define DOT() \
do { \
if (*cp != '.') \
return -1; \
++cp; \
} while (0)
NUMBER(major);
DOT();
NUMBER(minor);
if (*cp == 0)
return 0;
else if (*cp == '-')
goto status_tag;
DOT();
NUMBER(micro);
/* Get status */
if (*cp == 0) {
return 0;
} else if (*cp == '.') {
++cp;
} else if (*cp == '-') {
goto status_tag;
} else if (0==strncmp(cp, "pre", 3)) {
out->status = VER_PRE;
cp += 3;
} else if (0==strncmp(cp, "rc", 2)) {
out->status = VER_RC;
cp += 2;
} else {
return -1;
}
NUMBER(patchlevel);
status_tag:
/* Get status tag. */
if (*cp == '-' || *cp == '.')
++cp;
eos = (char*) find_whitespace(cp);
if (eos-cp >= (int)sizeof(out->status_tag))
strlcpy(out->status_tag, cp, sizeof(out->status_tag));
else {
memcpy(out->status_tag, cp, eos-cp);
out->status_tag[eos-cp] = 0;
}
cp = eat_whitespace(eos);
if (!strcmpstart(cp, "(r")) {
cp += 2;
out->svn_revision = (int) strtol(cp,&eos,10);
} else if (!strcmpstart(cp, "(git-")) {
char *close_paren = strchr(cp, ')');
int hexlen;
char digest[DIGEST_LEN];
if (! close_paren)
return -1;
cp += 5;
if (close_paren-cp > HEX_DIGEST_LEN)
return -1;
hexlen = (int)(close_paren-cp);
memwipe(digest, 0, sizeof(digest));
if ( hexlen == 0 || (hexlen % 2) == 1)
return -1;
if (base16_decode(digest, hexlen/2, cp, hexlen) != hexlen/2)
return -1;
memcpy(out->git_tag, digest, hexlen/2);
out->git_tag_len = hexlen/2;
}
return 0;
#undef NUMBER
#undef DOT
}
/** Compare two tor versions; Return <0 if a < b; 0 if a ==b, >0 if a >
* b. */
int
tor_version_compare(tor_version_t *a, tor_version_t *b)
{
int i;
tor_assert(a);
tor_assert(b);
/* We take this approach to comparison to ensure the same (bogus!) behavior
* on all inputs as we would have seen before bug #21278 was fixed. The
* only important difference here is that this method doesn't cause
* a signed integer underflow.
*/
#define CMP(field) do { \
unsigned aval = (unsigned) a->field; \
unsigned bval = (unsigned) b->field; \
int result = (int) (aval - bval); \
if (result < 0) \
return -1; \
else if (result > 0) \
return 1; \
} while (0)
CMP(major);
CMP(minor);
CMP(micro);
CMP(status);
CMP(patchlevel);
if ((i = strcmp(a->status_tag, b->status_tag)))
return i;
CMP(svn_revision);
CMP(git_tag_len);
if (a->git_tag_len)
return fast_memcmp(a->git_tag, b->git_tag, a->git_tag_len);
else
return 0;
#undef CMP
}
/** Return true iff versions a and b belong to the same series.
*/
int
tor_version_same_series(tor_version_t *a, tor_version_t *b)
{
tor_assert(a);
tor_assert(b);
return ((a->major == b->major) &&
(a->minor == b->minor) &&
(a->micro == b->micro));
}
/** Helper: Given pointers to two strings describing tor versions, return -1
* if _a precedes _b, 1 if _b precedes _a, and 0 if they are equivalent.
* Used to sort a list of versions. */
static int
compare_tor_version_str_ptr_(const void **_a, const void **_b)
{
const char *a = *_a, *b = *_b;
int ca, cb;
tor_version_t va, vb;
ca = tor_version_parse(a, &va);
cb = tor_version_parse(b, &vb);
/* If they both parse, compare them. */
if (!ca && !cb)
return tor_version_compare(&va,&vb);
/* If one parses, it comes first. */
if (!ca && cb)
return -1;
if (ca && !cb)
return 1;
/* If neither parses, compare strings. Also, the directory server admin
** needs to be smacked upside the head. But Tor is tolerant and gentle. */
return strcmp(a,b);
}
/** Sort a list of string-representations of versions in ascending order. */
void
sort_version_list(smartlist_t *versions, int remove_duplicates)
{
smartlist_sort(versions, compare_tor_version_str_ptr_);
if (remove_duplicates)
smartlist_uniq(versions, compare_tor_version_str_ptr_, tor_free_);
}
/** Parse and validate the ASCII-encoded v2 descriptor in desc,
* write the parsed descriptor to the newly allocated *parsed_out, the
* binary descriptor ID of length DIGEST_LEN to desc_id_out, the
* encrypted introduction points to the newly allocated
* *intro_points_encrypted_out, their encrypted size to
* *intro_points_encrypted_size_out, the size of the encoded descriptor
* to *encoded_size_out, and a pointer to the possibly next
* descriptor to *next_out; return 0 for success (including validation)
* and -1 for failure.
*
* If as_hsdir is 1, we're parsing this as an HSDir, and we should
* be strict about time formats.
*/
int
rend_parse_v2_service_descriptor(rend_service_descriptor_t **parsed_out,
char *desc_id_out,
char **intro_points_encrypted_out,
size_t *intro_points_encrypted_size_out,
size_t *encoded_size_out,
const char **next_out, const char *desc,
int as_hsdir)
{
rend_service_descriptor_t *result =
tor_malloc_zero(sizeof(rend_service_descriptor_t));
char desc_hash[DIGEST_LEN];
const char *eos;
smartlist_t *tokens = smartlist_new();
directory_token_t *tok;
char secret_id_part[DIGEST_LEN];
int i, version, num_ok=1;
smartlist_t *versions;
char public_key_hash[DIGEST_LEN];
char test_desc_id[DIGEST_LEN];
memarea_t *area = NULL;
const int strict_time_fmt = as_hsdir;
tor_assert(desc);
/* Check if desc starts correctly. */
if (strncmp(desc, "rendezvous-service-descriptor ",
strlen("rendezvous-service-descriptor "))) {
log_info(LD_REND, "Descriptor does not start correctly.");
goto err;
}
/* Compute descriptor hash for later validation. */
if (router_get_hash_impl(desc, strlen(desc), desc_hash,
"rendezvous-service-descriptor ",
"\nsignature", '\n', DIGEST_SHA1) < 0) {
log_warn(LD_REND, "Couldn't compute descriptor hash.");
goto err;
}
/* Determine end of string. */
eos = strstr(desc, "\nrendezvous-service-descriptor ");
if (!eos)
eos = desc + strlen(desc);
else
eos = eos + 1;
/* Check length. */
if (eos-desc > REND_DESC_MAX_SIZE) {
/* XXXX+ If we are parsing this descriptor as a server, this
* should be a protocol warning. */
log_warn(LD_REND, "Descriptor length is %d which exceeds "
"maximum rendezvous descriptor size of %d bytes.",
(int)(eos-desc), REND_DESC_MAX_SIZE);
goto err;
}
/* Tokenize descriptor. */
area = memarea_new();
if (tokenize_string(area, desc, eos, tokens, desc_token_table, 0)) {
log_warn(LD_REND, "Error tokenizing descriptor.");
goto err;
}
/* Set next to next descriptor, if available. */
*next_out = eos;
/* Set length of encoded descriptor. */
*encoded_size_out = eos - desc;
/* Check min allowed length of token list. */
if (smartlist_len(tokens) < 7) {
log_warn(LD_REND, "Impossibly short descriptor.");
goto err;
}
/* Parse base32-encoded descriptor ID. */
tok = find_by_keyword(tokens, R_RENDEZVOUS_SERVICE_DESCRIPTOR);
tor_assert(tok == smartlist_get(tokens, 0));
tor_assert(tok->n_args == 1);
if (!rend_valid_descriptor_id(tok->args[0])) {
log_warn(LD_REND, "Invalid descriptor ID: '%s'", tok->args[0]);
goto err;
}
if (base32_decode(desc_id_out, DIGEST_LEN,
tok->args[0], REND_DESC_ID_V2_LEN_BASE32) < 0) {
log_warn(LD_REND, "Descriptor ID contains illegal characters: %s",
tok->args[0]);
goto err;
}
/* Parse descriptor version. */
tok = find_by_keyword(tokens, R_VERSION);
tor_assert(tok->n_args == 1);
result->version =
(int) tor_parse_long(tok->args[0], 10, 0, INT_MAX, &num_ok, NULL);
if (result->version != 2 || !num_ok) {
/* If it's <2, it shouldn't be under this format. If the number
* is greater than 2, we bumped it because we broke backward
* compatibility. See how version numbers in our other formats
* work. */
log_warn(LD_REND, "Unrecognized descriptor version: %s",
escaped(tok->args[0]));
goto err;
}
/* Parse public key. */
tok = find_by_keyword(tokens, R_PERMANENT_KEY);
result->pk = tok->key;
tok->key = NULL; /* Prevent free */
/* Parse secret ID part. */
tok = find_by_keyword(tokens, R_SECRET_ID_PART);
tor_assert(tok->n_args == 1);
if (strlen(tok->args[0]) != REND_SECRET_ID_PART_LEN_BASE32 ||
strspn(tok->args[0], BASE32_CHARS) != REND_SECRET_ID_PART_LEN_BASE32) {
log_warn(LD_REND, "Invalid secret ID part: '%s'", tok->args[0]);
goto err;
}
if (base32_decode(secret_id_part, DIGEST_LEN, tok->args[0], 32) < 0) {
log_warn(LD_REND, "Secret ID part contains illegal characters: %s",
tok->args[0]);
goto err;
}
/* Parse publication time -- up-to-date check is done when storing the
* descriptor. */
tok = find_by_keyword(tokens, R_PUBLICATION_TIME);
tor_assert(tok->n_args == 1);
if (parse_iso_time_(tok->args[0], &result->timestamp,
strict_time_fmt, 0) < 0) {
log_warn(LD_REND, "Invalid publication time: '%s'", tok->args[0]);
goto err;
}
/* Parse protocol versions. */
tok = find_by_keyword(tokens, R_PROTOCOL_VERSIONS);
tor_assert(tok->n_args == 1);
versions = smartlist_new();
smartlist_split_string(versions, tok->args[0], ",",
SPLIT_SKIP_SPACE|SPLIT_IGNORE_BLANK, 0);
for (i = 0; i < smartlist_len(versions); i++) {
version = (int) tor_parse_long(smartlist_get(versions, i),
10, 0, INT_MAX, &num_ok, NULL);
if (!num_ok) /* It's a string; let's ignore it. */
continue;
if (version >= REND_PROTOCOL_VERSION_BITMASK_WIDTH)
/* Avoid undefined left-shift behaviour. */
continue;
result->protocols |= 1 << version;
}
SMARTLIST_FOREACH(versions, char *, cp, tor_free(cp));
smartlist_free(versions);
/* Parse encrypted introduction points. Don't verify. */
tok = find_opt_by_keyword(tokens, R_INTRODUCTION_POINTS);
if (tok) {
if (strcmp(tok->object_type, "MESSAGE")) {
log_warn(LD_DIR, "Bad object type: introduction points should be of "
"type MESSAGE");
goto err;
}
*intro_points_encrypted_out = tor_memdup(tok->object_body,
tok->object_size);
*intro_points_encrypted_size_out = tok->object_size;
} else {
*intro_points_encrypted_out = NULL;
*intro_points_encrypted_size_out = 0;
}
/* Parse and verify signature. */
tok = find_by_keyword(tokens, R_SIGNATURE);
if (check_signature_token(desc_hash, DIGEST_LEN, tok, result->pk, 0,
"v2 rendezvous service descriptor") < 0)
goto err;
/* Verify that descriptor ID belongs to public key and secret ID part. */
if (crypto_pk_get_digest(result->pk, public_key_hash) < 0) {
log_warn(LD_REND, "Unable to compute rend descriptor public key digest");
goto err;
}
rend_get_descriptor_id_bytes(test_desc_id, public_key_hash,
secret_id_part);
if (tor_memneq(desc_id_out, test_desc_id, DIGEST_LEN)) {
log_warn(LD_REND, "Parsed descriptor ID does not match "
"computed descriptor ID.");
goto err;
}
goto done;
err:
rend_service_descriptor_free(result);
result = NULL;
done:
if (tokens) {
SMARTLIST_FOREACH(tokens, directory_token_t *, t, token_clear(t));
smartlist_free(tokens);
}
if (area)
memarea_drop_all(area);
*parsed_out = result;
if (result)
return 0;
return -1;
}
/** Decrypt the encrypted introduction points in ipos_encrypted of
* length ipos_encrypted_size using descriptor_cookie and
* write the result to a newly allocated string that is pointed to by
* ipos_decrypted and its length to ipos_decrypted_size.
* Return 0 if decryption was successful and -1 otherwise. */
int
rend_decrypt_introduction_points(char **ipos_decrypted,
size_t *ipos_decrypted_size,
const char *descriptor_cookie,
const char *ipos_encrypted,
size_t ipos_encrypted_size)
{
tor_assert(ipos_encrypted);
tor_assert(descriptor_cookie);
if (ipos_encrypted_size < 2) {
log_warn(LD_REND, "Size of encrypted introduction points is too "
"small.");
return -1;
}
if (ipos_encrypted[0] == (int)REND_BASIC_AUTH) {
char iv[CIPHER_IV_LEN], client_id[REND_BASIC_AUTH_CLIENT_ID_LEN],
session_key[CIPHER_KEY_LEN], *dec;
int declen, client_blocks;
size_t pos = 0, len, client_entries_len;
crypto_digest_t *digest;
crypto_cipher_t *cipher;
client_blocks = (int) ipos_encrypted[1];
client_entries_len = client_blocks * REND_BASIC_AUTH_CLIENT_MULTIPLE *
REND_BASIC_AUTH_CLIENT_ENTRY_LEN;
if (ipos_encrypted_size < 2 + client_entries_len + CIPHER_IV_LEN + 1) {
log_warn(LD_REND, "Size of encrypted introduction points is too "
"small.");
return -1;
}
memcpy(iv, ipos_encrypted + 2 + client_entries_len, CIPHER_IV_LEN);
digest = crypto_digest_new();
crypto_digest_add_bytes(digest, descriptor_cookie, REND_DESC_COOKIE_LEN);
crypto_digest_add_bytes(digest, iv, CIPHER_IV_LEN);
crypto_digest_get_digest(digest, client_id,
REND_BASIC_AUTH_CLIENT_ID_LEN);
crypto_digest_free(digest);
for (pos = 2; pos < 2 + client_entries_len;
pos += REND_BASIC_AUTH_CLIENT_ENTRY_LEN) {
if (tor_memeq(ipos_encrypted + pos, client_id,
REND_BASIC_AUTH_CLIENT_ID_LEN)) {
/* Attempt to decrypt introduction points. */
cipher = crypto_cipher_new(descriptor_cookie);
if (crypto_cipher_decrypt(cipher, session_key, ipos_encrypted
+ pos + REND_BASIC_AUTH_CLIENT_ID_LEN,
CIPHER_KEY_LEN) < 0) {
log_warn(LD_REND, "Could not decrypt session key for client.");
crypto_cipher_free(cipher);
return -1;
}
crypto_cipher_free(cipher);
len = ipos_encrypted_size - 2 - client_entries_len - CIPHER_IV_LEN;
dec = tor_malloc_zero(len + 1);
declen = crypto_cipher_decrypt_with_iv(session_key, dec, len,
ipos_encrypted + 2 + client_entries_len,
ipos_encrypted_size - 2 - client_entries_len);
if (declen < 0) {
log_warn(LD_REND, "Could not decrypt introduction point string.");
tor_free(dec);
return -1;
}
if (fast_memcmpstart(dec, declen, "introduction-point ")) {
log_warn(LD_REND, "Decrypted introduction points don't "
"look like we could parse them.");
tor_free(dec);
continue;
}
*ipos_decrypted = dec;
*ipos_decrypted_size = declen;
return 0;
}
}
log_warn(LD_REND, "Could not decrypt introduction points. Please "
"check your authorization for this service!");
return -1;
} else if (ipos_encrypted[0] == (int)REND_STEALTH_AUTH) {
char *dec;
int declen;
if (ipos_encrypted_size < CIPHER_IV_LEN + 2) {
log_warn(LD_REND, "Size of encrypted introduction points is too "
"small.");
return -1;
}
dec = tor_malloc_zero(ipos_encrypted_size - CIPHER_IV_LEN - 1 + 1);
declen = crypto_cipher_decrypt_with_iv(descriptor_cookie, dec,
ipos_encrypted_size -
CIPHER_IV_LEN - 1,
ipos_encrypted + 1,
ipos_encrypted_size - 1);
if (declen < 0) {
log_warn(LD_REND, "Decrypting introduction points failed!");
tor_free(dec);
return -1;
}
*ipos_decrypted = dec;
*ipos_decrypted_size = declen;
return 0;
} else {
log_warn(LD_REND, "Unknown authorization type number: %d",
ipos_encrypted[0]);
return -1;
}
}
/** Parse the encoded introduction points in intro_points_encoded of
* length intro_points_encoded_size and write the result to the
* descriptor in parsed; return the number of successfully parsed
* introduction points or -1 in case of a failure. */
int
rend_parse_introduction_points(rend_service_descriptor_t *parsed,
const char *intro_points_encoded,
size_t intro_points_encoded_size)
{
const char *current_ipo, *end_of_intro_points;
smartlist_t *tokens = NULL;
directory_token_t *tok;
rend_intro_point_t *intro;
extend_info_t *info;
int result, num_ok=1;
memarea_t *area = NULL;
tor_assert(parsed);
/** Function may only be invoked once. */
tor_assert(!parsed->intro_nodes);
if (!intro_points_encoded || intro_points_encoded_size == 0) {
log_warn(LD_REND, "Empty or zero size introduction point list");
goto err;
}
/* Consider one intro point after the other. */
current_ipo = intro_points_encoded;
end_of_intro_points = intro_points_encoded + intro_points_encoded_size;
tokens = smartlist_new();
parsed->intro_nodes = smartlist_new();
area = memarea_new();
while (!fast_memcmpstart(current_ipo, end_of_intro_points-current_ipo,
"introduction-point ")) {
/* Determine end of string. */
const char *eos = tor_memstr(current_ipo, end_of_intro_points-current_ipo,
"\nintroduction-point ");
if (!eos)
eos = end_of_intro_points;
else
eos = eos+1;
tor_assert(eos <= intro_points_encoded+intro_points_encoded_size);
/* Free tokens and clear token list. */
SMARTLIST_FOREACH(tokens, directory_token_t *, t, token_clear(t));
smartlist_clear(tokens);
memarea_clear(area);
/* Tokenize string. */
if (tokenize_string(area, current_ipo, eos, tokens, ipo_token_table, 0)) {
log_warn(LD_REND, "Error tokenizing introduction point");
goto err;
}
/* Advance to next introduction point, if available. */
current_ipo = eos;
/* Check minimum allowed length of introduction point. */
if (smartlist_len(tokens) < 5) {
log_warn(LD_REND, "Impossibly short introduction point.");
goto err;
}
/* Allocate new intro point and extend info. */
intro = tor_malloc_zero(sizeof(rend_intro_point_t));
info = intro->extend_info = tor_malloc_zero(sizeof(extend_info_t));
/* Parse identifier. */
tok = find_by_keyword(tokens, R_IPO_IDENTIFIER);
if (base32_decode(info->identity_digest, DIGEST_LEN,
tok->args[0], REND_INTRO_POINT_ID_LEN_BASE32) < 0) {
log_warn(LD_REND, "Identity digest contains illegal characters: %s",
tok->args[0]);
rend_intro_point_free(intro);
goto err;
}
/* Write identifier to nickname. */
info->nickname[0] = '$';
base16_encode(info->nickname + 1, sizeof(info->nickname) - 1,
info->identity_digest, DIGEST_LEN);
/* Parse IP address. */
tok = find_by_keyword(tokens, R_IPO_IP_ADDRESS);
if (tor_addr_parse(&info->addr, tok->args[0])<0) {
log_warn(LD_REND, "Could not parse introduction point address.");
rend_intro_point_free(intro);
goto err;
}
if (tor_addr_family(&info->addr) != AF_INET) {
log_warn(LD_REND, "Introduction point address was not ipv4.");
rend_intro_point_free(intro);
goto err;
}
/* Parse onion port. */
tok = find_by_keyword(tokens, R_IPO_ONION_PORT);
info->port = (uint16_t) tor_parse_long(tok->args[0],10,1,65535,
&num_ok,NULL);
if (!info->port || !num_ok) {
log_warn(LD_REND, "Introduction point onion port %s is invalid",
escaped(tok->args[0]));
rend_intro_point_free(intro);
goto err;
}
/* Parse onion key. */
tok = find_by_keyword(tokens, R_IPO_ONION_KEY);
if (!crypto_pk_public_exponent_ok(tok->key)) {
log_warn(LD_REND,
"Introduction point's onion key had invalid exponent.");
rend_intro_point_free(intro);
goto err;
}
info->onion_key = tok->key;
tok->key = NULL; /* Prevent free */
/* Parse service key. */
tok = find_by_keyword(tokens, R_IPO_SERVICE_KEY);
if (!crypto_pk_public_exponent_ok(tok->key)) {
log_warn(LD_REND,
"Introduction point key had invalid exponent.");
rend_intro_point_free(intro);
goto err;
}
intro->intro_key = tok->key;
tok->key = NULL; /* Prevent free */
/* Add extend info to list of introduction points. */
smartlist_add(parsed->intro_nodes, intro);
}
result = smartlist_len(parsed->intro_nodes);
goto done;
err:
result = -1;
done:
/* Free tokens and clear token list. */
if (tokens) {
SMARTLIST_FOREACH(tokens, directory_token_t *, t, token_clear(t));
smartlist_free(tokens);
}
if (area)
memarea_drop_all(area);
return result;
}
/** Parse the content of a client_key file in ckstr and add
* rend_authorized_client_t's for each parsed client to
* parsed_clients. Return the number of parsed clients as result
* or -1 for failure. */
int
rend_parse_client_keys(strmap_t *parsed_clients, const char *ckstr)
{
int result = -1;
smartlist_t *tokens;
directory_token_t *tok;
const char *current_entry = NULL;
memarea_t *area = NULL;
char *err_msg = NULL;
if (!ckstr || strlen(ckstr) == 0)
return -1;
tokens = smartlist_new();
/* Begin parsing with first entry, skipping comments or whitespace at the
* beginning. */
area = memarea_new();
current_entry = eat_whitespace(ckstr);
while (!strcmpstart(current_entry, "client-name ")) {
rend_authorized_client_t *parsed_entry;
/* Determine end of string. */
const char *eos = strstr(current_entry, "\nclient-name ");
if (!eos)
eos = current_entry + strlen(current_entry);
else
eos = eos + 1;
/* Free tokens and clear token list. */
SMARTLIST_FOREACH(tokens, directory_token_t *, t, token_clear(t));
smartlist_clear(tokens);
memarea_clear(area);
/* Tokenize string. */
if (tokenize_string(area, current_entry, eos, tokens,
client_keys_token_table, 0)) {
log_warn(LD_REND, "Error tokenizing client keys file.");
goto err;
}
/* Advance to next entry, if available. */
current_entry = eos;
/* Check minimum allowed length of token list. */
if (smartlist_len(tokens) < 2) {
log_warn(LD_REND, "Impossibly short client key entry.");
goto err;
}
/* Parse client name. */
tok = find_by_keyword(tokens, C_CLIENT_NAME);
tor_assert(tok == smartlist_get(tokens, 0));
tor_assert(tok->n_args == 1);
if (!rend_valid_client_name(tok->args[0])) {
log_warn(LD_CONFIG, "Illegal client name: %s. (Length must be "
"between 1 and %d, and valid characters are "
"[A-Za-z0-9+-_].)", tok->args[0], REND_CLIENTNAME_MAX_LEN);
goto err;
}
/* Check if client name is duplicate. */
if (strmap_get(parsed_clients, tok->args[0])) {
log_warn(LD_CONFIG, "HiddenServiceAuthorizeClient contains a "
"duplicate client name: '%s'. Ignoring.", tok->args[0]);
goto err;
}
parsed_entry = tor_malloc_zero(sizeof(rend_authorized_client_t));
parsed_entry->client_name = tor_strdup(tok->args[0]);
strmap_set(parsed_clients, parsed_entry->client_name, parsed_entry);
/* Parse client key. */
tok = find_opt_by_keyword(tokens, C_CLIENT_KEY);
if (tok) {
parsed_entry->client_key = tok->key;
tok->key = NULL; /* Prevent free */
}
/* Parse descriptor cookie. */
tok = find_by_keyword(tokens, C_DESCRIPTOR_COOKIE);
tor_assert(tok->n_args == 1);
if (rend_auth_decode_cookie(tok->args[0], parsed_entry->descriptor_cookie,
NULL, &err_msg) < 0) {
tor_assert(err_msg);
log_warn(LD_REND, "%s", err_msg);
tor_free(err_msg);
goto err;
}
}
result = strmap_size(parsed_clients);
goto done;
err:
result = -1;
done:
/* Free tokens and clear token list. */
SMARTLIST_FOREACH(tokens, directory_token_t *, t, token_clear(t));
smartlist_free(tokens);
if (area)
memarea_drop_all(area);
return result;
}
/** Called on startup; right now we just handle scanning the unparseable
* descriptor dumps, but hang anything else we might need to do in the
* future here as well.
*/
void
routerparse_init(void)
{
/*
* Check both if the sandbox is active and whether it's configured; no
* point in loading all that if we won't be able to use it after the
* sandbox becomes active.
*/
if (!(sandbox_is_active() || get_options()->Sandbox)) {
dump_desc_init();
}
}
/** Clean up all data structures used by routerparse.c at exit */
void
routerparse_free_all(void)
{
dump_desc_fifo_cleanup();
}