namespace AMDGPU { enum { .... S_MOVRELS_B64 = 4043, .... }; } static bool isSMovRel(unsigned Opcode) { return Opcode == AMDGPU::S_MOVRELS_B32 || AMDGPU::S_MOVRELS_B64 || Opcode == AMDGPU::S_MOVRELD_B32 || AMDGPU::S_MOVRELD_B64; }
static Error mapNameAndUniqueName(....) { .... size_t BytesLeft = IO.maxFieldLength(); if (HasUniqueName) { ..... if (BytesNeeded > BytesLeft) { size_t BytesToDrop = (BytesNeeded - BytesLeft); size_t DropN = std::min(N.size(), BytesToDrop / 2); size_t DropU = std::min(U.size(), BytesToDrop - DropN); .... } } else { size_t BytesNeeded = Name.size() + 1; StringRef N = Name; if (BytesNeeded > BytesLeft) { size_t BytesToDrop = std::min(N.size(), BytesToDrop); // <= N = N.drop_back(BytesToDrop); } error(IO.mapStringZ(N)); } .... }
bool IteratorPastEndChecker::evalCall(const CallExpr *CE, CheckerContext &C) const { .... if (FD->getIdentifier() == II_find) { return evalFind(C, CE); } else if (FD->getIdentifier() == II_find_end) { return evalFindEnd(C, CE); } else if (FD->getIdentifier() == II_find_first_of) { return evalFindFirstOf(C, CE); } else if (FD->getIdentifier() == II_find_if) { // <= return evalFindIf(C, CE); } else if (FD->getIdentifier() == II_find_if) { // <= return evalFindIf(C, CE); } else if (FD->getIdentifier() == II_find_if_not) { return evalFindIfNot(C, CE); } else if (FD->getIdentifier() == II_upper_bound) { return evalUpperBound(C, CE); } else if (FD->getIdentifier() == II_lower_bound) { return evalLowerBound(C, CE); } else if (FD->getIdentifier() == II_search) { return evalSearch(C, CE); } else if (FD->getIdentifier() == II_search_n) { return evalSearchN(C, CE); } .... }
static void complete_mode (struct mode_data *m) { .... if ( m->cl == MODE_COMPLEX_INT || m->cl == MODE_COMPLEX_FLOAT) alignment = m->component->bytesize; // <= else alignment = m->bytesize; m->alignment = alignment & (~alignment + 1); if (m->component) // <= { m->next_cont = m->component->contained; m->component->contained = m; } }
void free_sese_info (sese_info_p region) { region->params.release (); region->loop_nest.release (); for (rename_map_t::iterator it = region->rename_map->begin(); it != region->rename_map->begin (); ++it) // <= (*it).second.release(); .... }
static bool dw_val_equal_p (dw_val_node *a, dw_val_node *b) { .... switch (a->val_class) { .... case dw_val_class_vms_delta: return ( !strcmp (a->v.val_vms_delta.lbl1, b->v.val_vms_delta.lbl1) && !strcmp (a->v.val_vms_delta.lbl1, b->v.val_vms_delta.lbl1)); .... } .... }
void initialize_sanitizer_builtins (void) { .... #define DEF_SANITIZER_BUILTIN(ENUM, NAME, TYPE, ATTRS) \ decl = add_builtin_function ("__builtin_" NAME, TYPE, ENUM, \ BUILT_IN_NORMAL, NAME, NULL_TREE); \ set_call_expr_flags (decl, ATTRS); \ set_builtin_decl (ENUM, decl, true); #include "sanitizer.def" if ((flag_sanitize & SANITIZE_OBJECT_SIZE) && !builtin_decl_implicit_p (BUILT_IN_OBJECT_SIZE)) DEF_SANITIZER_BUILTIN (BUILT_IN_OBJECT_SIZE, "object_size", BT_FN_SIZE_CONST_PTR_INT, ATTR_PURE_NOTHROW_LEAF_LIST) .... }
struct pfloghdr { u_int8_t length; sa_family_t af; u_int8_t action; u_int8_t reason; char ifname[IFNAMSIZ]; char ruleset[PFLOG_RULESET_NAME_SIZE]; u_int32_t rulenr; u_int32_t subrulenr; uid_t uid; pid_t pid; uid_t rule_uid; pid_t rule_pid; u_int8_t dir; u_int8_t pad[3]; }; static void nat64lsn_log(struct pfloghdr *plog, ....) { memset(plog, 0, sizeof(plog)); // <= plog->length = PFLOG_REAL_HDRLEN; plog->af = family; plog->action = PF_NAT; plog->dir = PF_IN; plog->rulenr = htonl(n); plog->subrulenr = htonl(sn); plog->ruleset[0] = '\0'; strlcpy(plog->ifname, "NAT64LSN", sizeof(plog->ifname)); ipfw_bpf_mtap2(plog, PFLOG_HDRLEN, m); }
static void dtrace_debug_output(void) { .... if (d->first < d->next) { char *p1 = dtrace_debug_bufr; count = (uintptr_t) d->next - (uintptr_t) d->first; for (p = d->first; p < d->next; p++) *p1++ = *p; } else if (d->next > d->first) { char *p1 = dtrace_debug_bufr; count = (uintptr_t) d->last - (uintptr_t) d->first; for (p = d->first; p < d->last; p++) *p1++ = *p; count += (uintptr_t) d->next - (uintptr_t) d->bufr; for (p = d->bufr; p < d->next; p++) *p1++ = *p; } .... }
static int p4_config_pmc(int cpu, int ri, struct pmc *pm) { .... int cfgflags, cpuflag; .... KASSERT(cfgflags >= 0 || cfgflags <= 3, ("[p4,%d] illegal cfgflags cfg=%d on cpu=%d ri=%d", __LINE__, cfgflags, cpu, ri)); .... KASSERT(cfgflags >= 0 || cfgflags <= 3, ("[p4,%d] illegal runcount cfg=%d on cpu=%d ri=%d", __LINE__, cfgflags, cpu, ri)); .... }
.... U16 max_ncq_depth; .... SCI_STATUS scif_user_parameters_set( SCI_CONTROLLER_HANDLE_T controller, SCIF_USER_PARAMETERS_T * scif_parms ) { .... if (scif_parms->sas.max_ncq_depth < 1 && scif_parms->sas.max_ncq_depth > 32) return SCI_FAILURE_INVALID_PARAMETER_VALUE; .... }
int mfi_tbolt_send_frame(struct mfi_softc *sc, struct mfi_command *cm) { .... uint8_t *cdb; .... /* check for inquiry commands coming from CLI */ if (cdb[0] != 0x28 || cdb[0] != 0x2A) { if ((req_desc = mfi_tbolt_build_mpt_cmd(sc, cm)) == NULL) { device_printf(sc->mfi_dev, "Mapping from MFI " "to MPT Failed \n"); return 1; } } .... }
#define OSSA_MPI_ENC_ERR_ILLEGAL_DEK_PARAM 0x2001 #define OSSA_MPI_ERR_DEK_MANAGEMENT_DEK_UNWRAP_FAIL 0x2002 GLOBAL bit32 mpiDekManagementRsp( agsaRoot_t *agRoot, agsaDekManagementRsp_t *pIomb ) { .... if (status == OSSA_MPI_ENC_ERR_ILLEGAL_DEK_PARAM || OSSA_MPI_ERR_DEK_MANAGEMENT_DEK_UNWRAP_FAIL) { agEvent.eq = errorQualifier; } .... }
#define A_TP_KEEP_INTVL 0x7dac static int sysctl_tp_timer(SYSCTL_HANDLER_ARGS) { struct adapter *sc = arg1; int reg = arg2; u_int tre; u_long tp_tick_us, v; u_int cclk_ps = 1000000000 / sc->params.vpd.cclk; MPASS(reg == A_TP_RXT_MIN || reg == A_TP_RXT_MAX || reg == A_TP_PERS_MIN || reg == A_TP_PERS_MAX || reg == A_TP_KEEP_IDLE || A_TP_KEEP_INTVL || reg == A_TP_INIT_SRTT || reg == A_TP_FINWAIT2_TIMER); .... }
static int mly_user_command(struct mly_softc *sc, struct mly_user_command *uc) { struct mly_command *mc; .... if (mc->mc_data != NULL) // <= free(mc->mc_data, M_DEVBUF); // <= if (mc != NULL) { // <= MLY_LOCK(sc); mly_release_command(mc); MLY_UNLOCK(sc); } return(error); }
static int ixgbe_add_vf(device_t dev, u16 vfnum, const nvlist_t *config) { .... if (nvlist_exists_binary(config, "mac-addr")) { mac = nvlist_get_binary(config, "mac-addr", NULL); bcopy(mac, vf->ether_addr, ETHER_ADDR_LEN); if (nvlist_get_bool(config, "allow-set-mac")) vf->flags |= IXGBE_VF_CAP_MAC; } else /* * If the administrator has not specified a MAC address then * we must allow the VF to choose one. */ vf->flags |= IXGBE_VF_CAP_MAC; vf->flags = IXGBE_VF_ACTIVE; .... }
static void bhnd_pmu1_pllinit0(struct bhnd_pmu_softc *sc, uint32_t xtal) { uint32_t pmuctrl; .... /* Write XtalFreq. Set the divisor also. */ pmuctrl = BHND_PMU_READ_4(sc, BHND_PMU_CTRL); pmuctrl = ~(BHND_PMU_CTRL_ILP_DIV_MASK | BHND_PMU_CTRL_XTALFREQ_MASK); pmuctrl |= BHND_PMU_SET_BITS(((xt->fref + 127) / 128) - 1, BHND_PMU_CTRL_ILP_DIV); pmuctrl |= BHND_PMU_SET_BITS(xt->xf, BHND_PMU_CTRL_XTALFREQ); .... }
static int wi_pci_resume(device_t dev) { struct wi_softc *sc = device_get_softc(dev); struct ieee80211com *ic = &sc->sc_ic; WI_LOCK(sc); if (sc->wi_bus_type != WI_BUS_PCI_NATIVE) { return (0); // <= WI_UNLOCK(sc); // <= } if (ic->ic_nrunning > 0) wi_init(sc); WI_UNLOCK(sc); return (0); }
void panic(const char *a) __dead2; static int mpr_alloc_requests(struct mpr_softc *sc) { .... else { panic("failed to allocate command %d\n", i); sc->num_reqs = i; break; } .... }
const cpp_token * parser::next () { const cpp_token *token; do { token = cpp_get_token (r); } while ( token->type == CPP_PADDING && token->type != CPP_EOF); // <= return token; }
unsigned HexagonEarlyIfConversion::computePhiCost(....) const { .... const MachineOperand &RA = MI.getOperand(1); const MachineOperand &RB = MI.getOperand(3); assert(RA.isReg() && RB.isReg()); // Must have a MUX if the phi uses a subregister. if (RA.getSubReg() != 0 || RA.getSubReg() != 0) { Cost++; continue; } .... }
Source: https://habr.com/ru/post/324222/
All Articles