Module: Hyperion::Http::ResponseWriter

Defined in:
lib/hyperion/http/response_writer.rb,
ext/hyperion_http/response_writer.c

Overview

Direct-syscall response writer for plain-TCP kernel fds.

The C primitives are registered as singleton methods on this very module by ‘ext/hyperion_http/response_writer.c` (see `Init_hyperion_response_writer`). Surface from C:

ResponseWriter.available?            -> true | false
ResponseWriter.c_write_buffered(io, status, headers, body,
                                keep_alive, date_str) -> Integer
ResponseWriter.c_write_chunked(io, status, headers, body,
                               keep_alive, date_str)  -> Integer
ResponseWriter.c_write_buffered_via_ring(io, status, headers,
                                         body, keep_alive,
                                         date_str, ring_ptr)
                                        -> Integer
  Plan #2 seam: submits a send SQE via the io_uring crate instead
  of issuing writev directly. Falls back to c_write_buffered when
  the io_uring crate is not loaded (hyp_submit_send_fn == NULL
  after lazy dlsym attempt). ring_ptr is the HotpathRing raw
  pointer as an Integer.

Operators can flip the dispatcher off at runtime with ‘Hyperion::Http::ResponseWriter.c_writer_available = false` (test seam / A/B rollback). Mirrors the `Hyperion::ResponseWriter.page_cache_available = false` pattern (response_writer.rb:60-65).

Constant Summary collapse

WOULDBLOCK =

Ruby caller checks for this value and falls back to io.write when the kernel send buffer is full (EAGAIN).

WOULDBLOCK sentinel

Class Attribute Summary collapse

Class Method Summary collapse

Class Attribute Details

.c_writer_available=(value) ⇒ Object (writeonly)

Sets the attribute c_writer_available

Parameters:

  • value

    the value to set the attribute c_writer_available to.



33
34
35
# File 'lib/hyperion/http/response_writer.rb', line 33

def c_writer_available=(value)
  @c_writer_available = value
end

Class Method Details

.available?Boolean

Returns:

  • (Boolean)


65
66
67
68
# File 'ext/hyperion_http/response_writer.c', line 65

static VALUE c_response_writer_available_p(VALUE self) {
    (void)self;
    return Qtrue;
}

.c_write_buffered(io, rb_status, rb_headers, rb_body, rb_keep_alive, rb_date) ⇒ Object

Hyperion::Http::ResponseWriter.c_write_buffered(io, status, headers,

body, keep_alive,
date_str) -> Integer

Writes a complete HTTP/1.1 response (head + body) to the kernel fd underlying ‘io` in a single sendmsg/writev call. Validates header values for CR/LF injection and body chunks for type safety before issuing the syscall.

Returns total bytes written on success. Returns HYP_C_WRITE_WOULDBLOCK (-2) on EAGAIN — caller falls back to io.write (which parks the fiber / blocks the thread correctly). Raises rb_eArgError on CR/LF in header values. Raises rb_eTypeError on non-String body chunks. Raises SystemCallError on hard write failures.



165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
# File 'ext/hyperion_http/response_writer.c', line 165

static VALUE c_write_buffered(VALUE self, VALUE io, VALUE rb_status,
                              VALUE rb_headers, VALUE rb_body,
                              VALUE rb_keep_alive, VALUE rb_date) {
    (void)self;

    /* 1. Type checks up front — fail fast on bad shapes before any
     *    syscall. Header CR/LF validation and value coercion happen
     *    inside cbuild_response_head (build_head_each), so we don't
     *    duplicate them here. */
    Check_Type(rb_headers, T_HASH);
    Check_Type(rb_body, T_ARRAY);

    /* 2. Resolve fd from the Ruby IO object. rb_funcall can GC; do it
     *    before we take any raw C pointers into Ruby objects. */
    int fd = NUM2INT(rb_funcall(io, id_fileno, 0));

    /* 3. Body type check and byte-size sum.
     *    RARRAY_AREF is safe while rb_body is live on the C stack. */
    long body_size = 0;
    long body_len  = RARRAY_LEN(rb_body);
    for (long i = 0; i < body_len; i++) {
        VALUE chunk = RARRAY_AREF(rb_body, i);
        Check_Type(chunk, T_STRING);
        body_size += RSTRING_LEN(chunk);
    }

    /* 4. Build the response head.
     *    hyperion_build_response_head lives in parser.c and is exported
     *    via response_writer.h. The reason String comes from a pre-baked
     *    frozen-String table — zero allocation for the 23 common statuses;
     *    only unknown statuses fall back to k_reason_unknown.
     *    cbuild_response_head's build_head_each performs the CR/LF guard
     *    and rb_obj_as_string coercion on header values, matching the
     *    Ruby fallback's semantics exactly. */
    int status = NUM2INT(rb_status);
    VALUE rb_reason = hyp_lookup_reason(status);
    VALUE head = hyperion_build_response_head(
        rb_status, rb_reason, rb_headers,
        LL2NUM(body_size), rb_keep_alive, rb_date
    );

    /* 5. Assemble iovec: slot 0 = response head; slots 1..N = body chunks
     *    (capped at HYP_C_IOV_MAX-1). Bodies longer than HYP_C_IOV_MAX-1
     *    chunks are coalesced into a single buffer allocated here. */
    struct iovec iov[HYP_C_IOV_MAX];
    iov[0].iov_base = RSTRING_PTR(head);
    iov[0].iov_len  = (size_t)RSTRING_LEN(head);
    int iov_count = 1;

    /* Hold a reference so GC can't reap the coalesced buffer before
     * the syscall completes. Qnil means "not used". */
    VALUE coalesced = Qnil;

    if (body_len <= (long)(HYP_C_IOV_MAX - 1)) {
        /* Fast path: each chunk gets its own iov slot. The Array `rb_body`
         * is a GC root that pins all its elements for our call duration. */
        for (long i = 0; i < body_len; i++) {
            VALUE chunk = RARRAY_AREF(rb_body, i);
            iov[iov_count].iov_base = RSTRING_PTR(chunk);
            iov[iov_count].iov_len  = (size_t)RSTRING_LEN(chunk);
            iov_count++;
        }
    } else {
        /* Slow path: coalesce into one buffer to keep iov_count bounded.
         * This branch fires only for Array bodies with >= 8 chunks — rare
         * in practice. We accept the one-time allocation. */
        coalesced = rb_str_buf_new(body_size);
        for (long i = 0; i < body_len; i++)
            rb_str_buf_append(coalesced, RARRAY_AREF(rb_body, i));
        iov[1].iov_base = RSTRING_PTR(coalesced);
        iov[1].iov_len  = (size_t)RSTRING_LEN(coalesced);
        iov_count = 2;
    }

    ssize_t n = hyp_writev_all(fd, iov, iov_count);

    /* GC-safety: keep `head` and `coalesced` (when used) alive across
     * the syscall. -O2 can elide local Ruby Strings whose only use is
     * the RSTRING_PTR at iov assembly; MRI's conservative GC stack
     * scan would then miss them. RB_GC_GUARD is the project-standard
     * idiom (parser.c uses it 9 times for the same pattern). */
    RB_GC_GUARD(head);
    RB_GC_GUARD(coalesced);

    if (n == HYP_C_WRITE_WOULDBLOCK) return INT2NUM(HYP_C_WRITE_WOULDBLOCK);
    return SSIZET2NUM(n);
}

.c_write_buffered_via_ring(io, rb_status, rb_headers, rb_body, rb_keep_alive, rb_date, rb_ring_ptr) ⇒ Object

Hyperion::Http::ResponseWriter.c_write_buffered_via_ring(io, status,

 headers, body,
 keep_alive,
 date_str,
 ring_ptr)
-> Integer

Plan #2 — io_uring-owned variant of c_write_buffered. Submits a send SQE via the Rust hyperion_io_uring crate instead of issuing write/writev directly. ‘ring_ptr` is the HotpathRing raw pointer cast to an Integer by the Ruby caller (Connection layer).

Falls back to direct write (c_write_buffered) when the io_uring crate isn’t loaded (hyp_submit_send_fn == NULL after lazy-resolve attempt).

iov lifetime caveat: the kernel reads iov data AFTER submit_send returns. The iov array is allocated via xmalloc and intentionally NOT freed here —the Ruby head + body Strings stay alive via GC roots; the iov array itself leaks one entry per response under sustained load.

TODO(plan #2 task 2.5): replace xmalloc-leak with a per-conn iov arena that frees on send-CQE completion. Current behavior leaks one iov array per response under sustained load.



473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
# File 'ext/hyperion_http/response_writer.c', line 473

static VALUE c_write_buffered_via_ring(VALUE self, VALUE io, VALUE rb_status,
                                        VALUE rb_headers, VALUE rb_body,
                                        VALUE rb_keep_alive, VALUE rb_date,
                                        VALUE rb_ring_ptr) {
    /* Lazy-resolve the io_uring submit_send symbol on first call. After the
     * first successful resolve, hyp_submit_send_fn is non-NULL and this
     * branch is skipped on every subsequent call (~50 ns dlsym cost paid
     * once per process, not per request). */
    if (!hyp_submit_send_fn) {
        hyp_submit_send_fn =
            (int (*)(void *, int, const void *, unsigned int))
            dlsym(RTLD_DEFAULT, "hyperion_io_uring_hotpath_submit_send");
    }
    if (!hyp_submit_send_fn) {
        /* io_uring crate not loaded — fall back to direct write path. */
        return c_write_buffered(self, io, rb_status, rb_headers, rb_body,
                                rb_keep_alive, rb_date);
    }

    /* Resolve fd before taking raw C pointers into Ruby objects (rb_funcall
     * may GC). */
    int fd = NUM2INT(rb_funcall(io, id_fileno, 0));

    Check_Type(rb_headers, T_HASH);
    Check_Type(rb_body, T_ARRAY);

    /* Sum body bytes and type-check chunks. */
    long body_size = 0;
    long body_len  = RARRAY_LEN(rb_body);
    for (long i = 0; i < body_len; i++) {
        VALUE chunk = RARRAY_AREF(rb_body, i);
        Check_Type(chunk, T_STRING);
        body_size += RSTRING_LEN(chunk);
    }

    int status = NUM2INT(rb_status);
    VALUE rb_reason = hyp_lookup_reason(status);
    VALUE head = hyperion_build_response_head(
        rb_status, rb_reason, rb_headers,
        LL2NUM(body_size), rb_keep_alive, rb_date
    );

    /* Allocate iov array via xmalloc (Ruby-tracked). The kernel reads from
     * the iov pointers AFTER submit_send returns; the iovs + their backing
     * memory (RSTRING_PTR into Ruby Strings) MUST stay alive until the send
     * CQE is processed by the accept fiber.
     *
     * TODO(plan #2 task 2.5): replace xmalloc-leak with a per-conn iov arena
     * that frees on send-CQE completion. Current behavior leaks one iov array
     * per response under sustained load. */
    long total_iov = 1 + body_len;
    struct iovec *iov = ALLOC_N(struct iovec, total_iov);
    iov[0].iov_base = RSTRING_PTR(head);
    iov[0].iov_len  = (size_t)RSTRING_LEN(head);
    for (long i = 0; i < body_len; i++) {
        VALUE chunk = RARRAY_AREF(rb_body, i);
        iov[i + 1].iov_base = RSTRING_PTR(chunk);
        iov[i + 1].iov_len  = (size_t)RSTRING_LEN(chunk);
    }

    void *ring_ptr = (void *)NUM2SIZET(rb_ring_ptr);
    int rc = hyp_submit_send_fn(ring_ptr, fd, iov, (unsigned int)total_iov);
    if (rc < 0) {
        xfree(iov);
        rb_sys_fail("hotpath submit_send");
    }

    /* Keep head alive across the submit_send call so the GC does not reap
     * the Ruby String whose RSTRING_PTR is in iov[0]. rb_body (the Array)
     * is a GC root that pins all body chunks for us. */
    RB_GC_GUARD(head);

    /* Return bytes-to-be-written (speculative; the actual byte count is
     * confirmed by the send CQE in the accept fiber — Task 2.5 wires
     * CQE feedback for metrics reconciliation). */
    return SIZET2NUM((size_t)RSTRING_LEN(head) + (size_t)body_size);
}

.c_write_chunked(io, rb_status, rb_headers, rb_body, rb_keep_alive, rb_date) ⇒ Object

Hyperion::Http::ResponseWriter.c_write_chunked(io, status, headers,

body, keep_alive,
date_str) -> Integer


393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
# File 'ext/hyperion_http/response_writer.c', line 393

static VALUE c_write_chunked(VALUE self, VALUE io, VALUE rb_status,
                             VALUE rb_headers, VALUE rb_body,
                             VALUE rb_keep_alive, VALUE rb_date) {
    (void)self;
    Check_Type(rb_headers, T_HASH);

    int fd = NUM2INT(rb_funcall(io, id_fileno, 0));
    int status = NUM2INT(rb_status);
    VALUE rb_reason = hyp_lookup_reason(status);

    /* Build chunked head: emits transfer-encoding: chunked instead of
     * content-length; drops caller-supplied content-length and TE. */
    VALUE head = hyperion_build_response_head_chunked(
        rb_status, rb_reason, rb_headers, rb_keep_alive, rb_date
    );

    struct hyp_chunked_state st;
    memset(&st, 0, sizeof(st));
    st.fd = fd;

    /* Emit the head as a single syscall. */
    struct iovec head_iov[1];
    head_iov[0].iov_base = (void *)RSTRING_PTR(head);
    head_iov[0].iov_len  = (size_t)RSTRING_LEN(head);
    ssize_t n = hyp_writev_all(fd, head_iov, 1);
    if (n == HYP_C_WRITE_WOULDBLOCK) {
        RB_GC_GUARD(head);
        return INT2NUM(HYP_C_WRITE_WOULDBLOCK);
    }
    st.bytes_written += (size_t)RSTRING_LEN(head);

    /* Iterate body via rb_block_call. Ruby exceptions propagate
     * (the dispatcher's Connection#serve rescue handles teardown).
     * id_each cached at init. */
    rb_block_call(rb_body, id_each, 0, NULL,
                  hyp_chunked_callback, (VALUE)&st);

    /* Drain coalesce + emit terminator atomically when possible:
     * coalesce buffer has room → memcpy the terminator and drain
     * (single syscall ends the response). Otherwise drain first
     * then write the terminator separately. */
    static const unsigned char term[] = { '0','\r','\n','\r','\n' };
    if (st.buf_used + sizeof(term) <= sizeof(st.buf)) {
        memcpy(st.buf + st.buf_used, term, sizeof(term));
        st.buf_used += sizeof(term);
        hyp_chunked_drain(&st);
    } else {
        hyp_chunked_drain(&st);
        struct iovec t_iov[1] = {{ (void *)term, sizeof(term) }};
        ssize_t tn = hyp_writev_all(fd, t_iov, 1);
        if (tn >= 0) st.bytes_written += sizeof(term);
    }

    RB_GC_GUARD(head);
    return SIZET2NUM(st.bytes_written);
}

.c_writer_available?Boolean

Returns:

  • (Boolean)


35
36
37
38
39
40
41
42
# File 'lib/hyperion/http/response_writer.rb', line 35

def c_writer_available?
  return @c_writer_available unless @c_writer_available.nil?

  @c_writer_available =
    respond_to?(:available?) && available? &&
    respond_to?(:c_write_buffered) &&
    respond_to?(:c_write_chunked)
end