* Dispatch queues are reference counted via calls to dispatch_retain()and * dispatch_release(). Pending workitems submitted to a queue also hold a * reference to the queue until they have finished. Once all references to a * queue have been released, the queue will be deallocated by the system.
void _dispatch_queue_xref_dispose(dispatch_queue_t dq) { uint64_t dq_state = os_atomic_load2o(dq, dq_state, relaxed); if (unlikely(_dq_state_is_suspended(dq_state))) { long state = (long)dq_state; if (sizeof(long) < sizeof(uint64_t)) state = (long)(dq_state >> 32); if (unlikely(_dq_state_is_inactive(dq_state))) { // Arguments for and against this assert are within 6705399 DISPATCH_CLIENT_CRASH(state, "Release of an inactive object"); } DISPATCH_CLIENT_CRASH(dq_state, "Release of a suspended object"); } os_atomic_or2o(dq, dq_atomic_flags, DQF_RELEASED, relaxed); }
_dispatch_queue_attr_overcommit_t overcommit = dqai.dqai_overcommit; if (overcommit != _dispatch_queue_attr_overcommit_unspecified && tq) { if (tq->do_targetq) { DISPATCH_CLIENT_CRASH(tq, "Cannot specify both overcommit and " "a non-global target queue"); } }
if (tq && dx_type(tq) == DISPATCH_QUEUE_GLOBAL_ROOT_TYPE) { // Handle discrepancies between attr and target queue, attributes win if (overcommit == _dispatch_queue_attr_overcommit_unspecified) { if (tq->dq_priority & DISPATCH_PRIORITY_FLAG_OVERCOMMIT) { overcommit = _dispatch_queue_attr_overcommit_enabled; } else { overcommit = _dispatch_queue_attr_overcommit_disabled; } } if (qos == DISPATCH_QOS_UNSPECIFIED) { qos = _dispatch_priority_qos(tq->dq_priority); } tq = NULL; } elseif (tq && !tq->do_targetq) { // target is a pthread or runloop root queue, setting QoS or overcommit // is disallowed if (overcommit != _dispatch_queue_attr_overcommit_unspecified) { DISPATCH_CLIENT_CRASH(tq, "Cannot specify an overcommit attribute " "and use this kind of target queue"); } } else { if (overcommit == _dispatch_queue_attr_overcommit_unspecified) { // Serial queues default to overcommit! overcommit = dqai.dqai_concurrent ? _dispatch_queue_attr_overcommit_disabled : _dispatch_queue_attr_overcommit_enabled; } } if (!tq) { tq = _dispatch_get_root_queue( qos == DISPATCH_QOS_UNSPECIFIED ? DISPATCH_QOS_DEFAULT : qos, overcommit == _dispatch_queue_attr_overcommit_enabled)->_as_dq; if (unlikely(!tq)) { DISPATCH_CLIENT_CRASH(qos, "Invalid queue attribute"); } }
// // Step 2: Initialize the queue //
if (legacy) { // if any of these attributes is specified, use non legacy classes if (dqai.dqai_inactive || dqai.dqai_autorelease_frequency) { legacy = false; } }
if (unlikely(dx_metatype(dq) != _DISPATCH_LANE_TYPE)) { DISPATCH_CLIENT_CRASH(0, "Queue type doesn't support dispatch_sync"); }
dispatch_lane_t dl = upcast(dq)._dl; // Global concurrent queues and queues bound to non-dispatch threads // always fall into the slow case, see DISPATCH_ROOT_QUEUE_STATE_INIT_VALUE if (unlikely(!_dispatch_queue_try_reserve_sync_width(dl))) { return _dispatch_sync_f_slow(dl, ctxt, func, 0, dl, dc_flags); }
if (unlikely(dx_metatype(dq) != _DISPATCH_LANE_TYPE)) { DISPATCH_CLIENT_CRASH(0, "Queue type doesn't support dispatch_sync"); }
dispatch_lane_t dl = upcast(dq)._dl; // The more correct thing to do would be to merge the qos of the thread // that just acquired the barrier lock into the queue state. // // However this is too expensive for the fast path, so skip doing it. // The chosen tradeoff is that if an enqueue on a lower priority thread // contends with this fast path, this thread may receive a useless override. // // Global concurrent queues and queues bound to non-dispatch threads // always fall into the slow case, see DISPATCH_ROOT_QUEUE_STATE_INIT_VALUE if (unlikely(!_dispatch_queue_try_acquire_barrier_sync(dl, tid))) { return _dispatch_sync_f_slow(dl, ctxt, func, DC_FLAG_BARRIER, dl, DC_FLAG_BARRIER | dc_flags); }
staticinlinevoid _dispatch_root_queue_push_inline(dispatch_queue_global_t dq, dispatch_object_t _head, dispatch_object_t _tail, int n) { structdispatch_object_s *hd = _head._do, *tl = _tail._do; if (unlikely(os_mpsc_push_list(os_mpsc(dq, dq_items), hd, tl, do_next))) { return _dispatch_root_queue_poke(dq, n, 0); } }
这里居然出现了unlikely()
1 2 3 4 5
// Returns true when the queue was empty and the head must be set #define os_mpsc_push_item(Q, tail, _o_next) ({ \ os_mpsc_node_type(Q) _tail = (tail); \ os_mpsc_push_list(Q, _tail, _tail, _o_next); \ })
int _pthread_workqueue_addthreads(int numthreads, pthread_priority_t priority) { int res = 0;
if (__libdispatch_workerfunction == NULL) { return EPERM; }
#if TARGET_OS_OSX // <rdar://problem/37687655> Legacy simulators fail to boot // // Older sims set the deprecated _PTHREAD_PRIORITY_ROOTQUEUE_FLAG wrongly, // which is aliased to _PTHREAD_PRIORITY_SCHED_PRI_FLAG and that XNU // validates and rejects. // // As a workaround, forcefully unset this bit that cannot be set here // anyway. priority &= ~_PTHREAD_PRIORITY_SCHED_PRI_FLAG; #endif
res = __workq_kernreturn(WQOPS_QUEUE_REQTHREADS, NULL, numthreads, (int)priority); if (res == -1) { res = errno; } return res; }